diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index 2e61f36f47..3a1f9869dc 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -9,6 +9,7 @@ on: push: branches: - master + - 'release-v**' env: GO_VERSION: "1.22" PRIORITIES: "P0" @@ -32,8 +33,7 @@ jobs: component: - datacatalog - flyteadmin - # TODO(monorepo): Enable lint flytecopilot - # - flytecopilot + - flytecopilot - flytectl - flyteidl - flyteplugins diff --git a/.github/workflows/flytectl-install.yml b/.github/workflows/flytectl-install.yml index cbdb3795e9..3d56a805e2 100644 --- a/.github/workflows/flytectl-install.yml +++ b/.github/workflows/flytectl-install.yml @@ -8,6 +8,7 @@ on: pull_request: paths: - flytectl/** + - 'release-v**' push: branches: - master diff --git a/.github/workflows/flyteidl-buf-publish.yml b/.github/workflows/flyteidl-buf-publish.yml index f11bf3d44a..aef8c94e28 100644 --- a/.github/workflows/flyteidl-buf-publish.yml +++ b/.github/workflows/flyteidl-buf-publish.yml @@ -6,6 +6,7 @@ on: - artifacts-shell-2 - artifacts - master + - 'release-v**' paths: - 'flyteidl/**' jobs: diff --git a/.github/workflows/flyteidl-checks.yml b/.github/workflows/flyteidl-checks.yml index f8a1d0f4d2..781b173e40 100644 --- a/.github/workflows/flyteidl-checks.yml +++ b/.github/workflows/flyteidl-checks.yml @@ -9,6 +9,7 @@ on: push: branches: - master + - 'release-v**' env: GO_VERSION: "1.22" jobs: diff --git a/.github/workflows/helm-charts.yaml b/.github/workflows/helm-charts.yaml index 63e81adf3a..6c87eda61c 100644 --- a/.github/workflows/helm-charts.yaml +++ b/.github/workflows/helm-charts.yaml @@ -10,6 +10,7 @@ on: branches: - master - rc/* + - 'release-v**' workflow_dispatch: jobs: diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index fe2f8535af..506eca5ae9 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -59,7 +59,7 @@ jobs: - name: Before Build run: ${{ inputs.before-build }} - name: Build and Push Image - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v6 with: context: . file: ${{ inputs.dockerfile }} diff --git a/.github/workflows/sandbox.yml b/.github/workflows/sandbox.yml index 0899ec83e6..1e18109277 100644 --- a/.github/workflows/sandbox.yml +++ b/.github/workflows/sandbox.yml @@ -53,7 +53,7 @@ jobs: username: "${{ secrets.FLYTE_BOT_USERNAME }}" password: "${{ secrets.FLYTE_BOT_PAT }}" - name: Build and push DIND Image - uses: docker/build-push-action@v2 + uses: docker/build-push-action@v6 with: context: . platforms: linux/arm64, linux/amd64 diff --git a/.github/workflows/single-binary.yml b/.github/workflows/single-binary.yml index 4f3fdea994..0f64525860 100644 --- a/.github/workflows/single-binary.yml +++ b/.github/workflows/single-binary.yml @@ -10,6 +10,7 @@ on: branches: - master - rc/* + - 'release-v**' workflow_dispatch: jobs: @@ -72,7 +73,7 @@ jobs: run: | mkdir -p docker/sandbox-bundled/images/tar/{arm64,amd64} - name: Export ARM64 Image - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v6 with: context: . platforms: linux/arm64 @@ -83,7 +84,7 @@ jobs: file: Dockerfile outputs: type=docker,dest=docker/sandbox-bundled/images/tar/arm64/flyte-binary.tar - name: Export AMD64 Image - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v6 with: context: . platforms: linux/amd64 @@ -107,7 +108,7 @@ jobs: password: "${{ secrets.FLYTE_BOT_PAT }}" - name: Build and push Image if: ${{ github.event_name == 'push' || github.event_name == 'workflow_dispatch' }} - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v6 with: context: . platforms: linux/arm64, linux/amd64 @@ -143,7 +144,7 @@ jobs: driver-opts: image=moby/buildkit:master buildkitd-flags: "--allow-insecure-entitlement security.insecure" - name: Build sandbox image for functional tests - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v6 with: context: docker/sandbox-bundled load: true @@ -239,7 +240,7 @@ jobs: username: "${{ secrets.FLYTE_BOT_USERNAME }}" password: "${{ secrets.FLYTE_BOT_PAT }}" - name: Build and push multi-arch image - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v6 with: context: docker/sandbox-bundled allow: "security.insecure" diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 0571e60eea..8915d88fbe 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -8,6 +8,7 @@ on: push: branches: - master + - 'release-v**' pull_request: jobs: compile: diff --git a/.github/workflows/validate-helm-charts.yaml b/.github/workflows/validate-helm-charts.yaml index 8e40ffe8d7..1bf450a858 100644 --- a/.github/workflows/validate-helm-charts.yaml +++ b/.github/workflows/validate-helm-charts.yaml @@ -4,6 +4,7 @@ on: pull_request: branches: - master + - 'release-v**' paths: - "charts/**" - "deployment/**" diff --git a/CODEOWNERS b/CODEOWNERS index da2c5bdd5c..635c0d4326 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,3 +1,3 @@ # Automatically request docs team for docs PR review -/docs/ @neverett @ppiegaze +/docs/ @ppiegaze /docs/deployment/ @davidmirror-ops diff --git a/Dockerfile b/Dockerfile index fc32351ebe..ead022d036 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,8 +5,8 @@ FROM ghcr.io/flyteorg/flyteconsole:${FLYTECONSOLE_VERSION} AS flyteconsole FROM --platform=${BUILDPLATFORM} golang:1.22-bookworm AS flytebuilder ARG TARGETARCH -ENV GOARCH "${TARGETARCH}" -ENV GOOS linux +ENV GOARCH="${TARGETARCH}" +ENV GOOS=linux WORKDIR /flyteorg/build @@ -29,10 +29,10 @@ RUN --mount=type=cache,target=/root/.cache/go-build --mount=type=cache,target=/r FROM debian:bookworm-slim ARG FLYTE_VERSION -ENV FLYTE_VERSION "${FLYTE_VERSION}" +ENV FLYTE_VERSION="${FLYTE_VERSION}" -ENV DEBCONF_NONINTERACTIVE_SEEN true -ENV DEBIAN_FRONTEND noninteractive +ENV DEBCONF_NONINTERACTIVE_SEEN=true +ENV DEBIAN_FRONTEND=noninteractive # Install core packages RUN apt-get update && apt-get install --no-install-recommends --yes \ diff --git a/Dockerfile.datacatalog b/Dockerfile.datacatalog index b6bfc8707d..80f683a40f 100644 --- a/Dockerfile.datacatalog +++ b/Dockerfile.datacatalog @@ -3,12 +3,12 @@ # # TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst -FROM --platform=${BUILDPLATFORM} golang:1.22-alpine3.18 as builder +FROM --platform=${BUILDPLATFORM} golang:1.22-alpine3.18 AS builder ARG TARGETARCH -ENV GOARCH "${TARGETARCH}" -ENV GOOS linux +ENV GOARCH="${TARGETARCH}" +ENV GOOS=linux RUN apk add git openssh-client make curl diff --git a/Dockerfile.flyteadmin b/Dockerfile.flyteadmin index 2fe21cccc2..9e33229959 100644 --- a/Dockerfile.flyteadmin +++ b/Dockerfile.flyteadmin @@ -3,11 +3,11 @@ # # TO OPT OUT OF UPDATES, SEE https://github.com/lyft/boilerplate/blob/master/Readme.rst -FROM --platform=${BUILDPLATFORM} golang:1.22-alpine3.18 as builder +FROM --platform=${BUILDPLATFORM} golang:1.22-alpine3.18 AS builder ARG TARGETARCH -ENV GOARCH "${TARGETARCH}" -ENV GOOS linux +ENV GOARCH="${TARGETARCH}" +ENV GOOS=linux RUN apk add git openssh-client make curl @@ -41,7 +41,7 @@ ENV PATH="/artifacts:${PATH}" # This will eventually move to centurylink/ca-certs:latest for minimum possible image size FROM alpine:3.18 -LABEL org.opencontainers.image.source https://github.com/flyteorg/flyteadmin +LABEL org.opencontainers.image.source=https://github.com/flyteorg/flyteadmin COPY --from=builder /artifacts /bin diff --git a/Dockerfile.flytecopilot b/Dockerfile.flytecopilot index 44107f1097..66e1d2c295 100644 --- a/Dockerfile.flytecopilot +++ b/Dockerfile.flytecopilot @@ -3,12 +3,12 @@ # # TO OPT OUT OF UPDATES, SEE https://github.com/lyft/boilerplate/blob/master/Readme.rst -FROM --platform=${BUILDPLATFORM} golang:1.22-alpine3.18 as builder +FROM --platform=${BUILDPLATFORM} golang:1.22-alpine3.18 AS builder ARG TARGETARCH -ENV GOARCH "${TARGETARCH}" -ENV GOOS linux +ENV GOARCH="${TARGETARCH}" +ENV GOOS=linux RUN apk add git openssh-client make curl @@ -32,7 +32,7 @@ ENV PATH="/artifacts:${PATH}" # This will eventually move to centurylink/ca-certs:latest for minimum possible image size FROM alpine:3.18 -LABEL org.opencontainers.image.source https://github.com/lyft/flyteplugins +LABEL org.opencontainers.image.source=https://github.com/lyft/flyteplugins COPY --from=builder /artifacts /bin diff --git a/Dockerfile.flytepropeller b/Dockerfile.flytepropeller index 058d78b219..4963a530a4 100644 --- a/Dockerfile.flytepropeller +++ b/Dockerfile.flytepropeller @@ -4,12 +4,12 @@ # TO OPT OUT OF UPDATES, SEE https://github.com/lyft/boilerplate/blob/master/Readme.rst -FROM --platform=${BUILDPLATFORM} golang:1.22-alpine3.18 as builder +FROM --platform=${BUILDPLATFORM} golang:1.22-alpine3.18 AS builder ARG TARGETARCH -ENV GOARCH "${TARGETARCH}" -ENV GOOS linux +ENV GOARCH="${TARGETARCH}" +ENV GOOS=linux RUN apk add git openssh-client make curl @@ -33,7 +33,7 @@ ENV PATH="/artifacts:${PATH}" # This will eventually move to centurylink/ca-certs:latest for minimum possible image size FROM alpine:3.18 -LABEL org.opencontainers.image.source https://github.com/flyteorg/flytepropeller +LABEL org.opencontainers.image.source=https://github.com/flyteorg/flytepropeller COPY --from=builder /artifacts /bin diff --git a/Dockerfile.flytescheduler b/Dockerfile.flytescheduler index b22d050fc9..a7119d0d9b 100644 --- a/Dockerfile.flytescheduler +++ b/Dockerfile.flytescheduler @@ -4,12 +4,12 @@ # TO OPT OUT OF UPDATES, SEE https://github.com/lyft/boilerplate/blob/master/Readme.rst -FROM --platform=${BUILDPLATFORM} golang:1.22-alpine3.18 as builder +FROM --platform=${BUILDPLATFORM} golang:1.22-alpine3.18 AS builder ARG TARGETARCH -ENV GOARCH "${TARGETARCH}" -ENV GOOS linux +ENV GOARCH="${TARGETARCH}" +ENV GOOS=linux RUN apk add git openssh-client make curl @@ -36,7 +36,7 @@ ENV PATH="/artifacts:${PATH}" # This will eventually move to centurylink/ca-certs:latest for minimum possible image size FROM alpine:3.18 -LABEL org.opencontainers.image.source https://github.com/flyteorg/flyteadmin +LABEL org.opencontainers.image.source=https://github.com/flyteorg/flyteadmin COPY --from=builder /artifacts /bin diff --git a/Makefile b/Makefile index a8ac961f02..eacc4c69ae 100644 --- a/Makefile +++ b/Makefile @@ -135,6 +135,7 @@ go-tidy: make -C flyteplugins go-tidy make -C flytestdlib go-tidy make -C flytecopilot go-tidy + make -C flytectl go-tidy .PHONY: lint-helm-charts lint-helm-charts: diff --git a/README.md b/README.md index 3bf96ba0c6..fd209fa55a 100644 --- a/README.md +++ b/README.md @@ -157,9 +157,7 @@ There are many ways to get involved in Flyte, including: ### We :heart: our contributors - -[![953358](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/953358?v=4&w=50&h=50&mask=circle)](https://github.com/katrogan)[![37090125](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/37090125?v=4&w=50&h=50&mask=circle)](https://github.com/lyft-metaservice-3)[![7597118](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/7597118?v=4&w=50&h=50&mask=circle)](https://github.com/matthewphsmith)[![27159](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/27159?v=4&w=50&h=50&mask=circle)](https://github.com/EngHabu)[![29843943](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/29843943?v=4&w=50&h=50&mask=circle)](https://github.com/goreleaserbot)[![10830562](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/10830562?v=4&w=50&h=50&mask=circle)](https://github.com/evalsocket)[![8888115](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/8888115?v=4&w=50&h=50&mask=circle)](https://github.com/hamersaw)[![78108056](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/78108056?v=4&w=50&h=50&mask=circle)](https://github.com/flyte-bot)[![158892](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/158892?v=4&w=50&h=50&mask=circle)](https://github.com/honnix)[![18408237](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/18408237?v=4&w=50&h=50&mask=circle)](https://github.com/anandswaminathan)[![2896568](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/2896568?v=4&w=50&h=50&mask=circle)](https://github.com/wild-endeavor)[![1518524](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1518524?v=4&w=50&h=50&mask=circle)](https://github.com/bnsblue)[![37936015](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/37936015?v=4&w=50&h=50&mask=circle)](https://github.com/pingsutw)[![27724763](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/27724763?v=4&w=50&h=50&mask=circle)](https://github.com/iaroslav-ciupin)[![16888709](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/16888709?v=4&w=50&h=50&mask=circle)](https://github.com/kumare3)[![27777173](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/27777173?v=4&w=50&h=50&mask=circle)](https://github.com/samhita-alla)[![452166](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/452166?v=4&w=50&h=50&mask=circle)](https://github.com/MorpheusXAUT)[![4748985](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/4748985?v=4&w=50&h=50&mask=circle)](https://github.com/aliabbasjaffri)[![6562898](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6562898?v=4&w=50&h=50&mask=circle)](https://github.com/ckiosidis)[![6239450](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6239450?v=4&w=50&h=50&mask=circle)](https://github.com/mayitbeegh)[![8805803](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/8805803?v=4&w=50&h=50&mask=circle)](https://github.com/alexlipa91)[![5032356](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5032356?v=4&w=50&h=50&mask=circle)](https://github.com/brucearctor)[![77798312](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/77798312?v=4&w=50&h=50&mask=circle)](https://github.com/pmahindrakar-oss)[![23062603](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/23062603?v=4&w=50&h=50&mask=circle)](https://github.com/Antaxify)[![653394](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/653394?v=4&w=50&h=50&mask=circle)](https://github.com/eapolinario)[![5725707](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5725707?v=4&w=50&h=50&mask=circle)](https://github.com/andrewwdye)[![8122852](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/8122852?v=4&w=50&h=50&mask=circle)](https://github.com/ariefrahmansyah)[![10869815](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/10869815?v=4&w=50&h=50&mask=circle)](https://github.com/jeevb)[![3880645](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/3880645?v=4&w=50&h=50&mask=circle)](https://github.com/jonathanburns)[![3936213](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/3936213?v=4&w=50&h=50&mask=circle)](https://github.com/lu4nm3)[![26174213](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/26174213?v=4&w=50&h=50&mask=circle)](https://github.com/lyft-metaservice-2)[![9142716](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/9142716?v=4&w=50&h=50&mask=circle)](https://github.com/2uasimojo)[![5487021](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5487021?v=4&w=50&h=50&mask=circle)](https://github.com/veggiemonk)[![1815175](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1815175?v=4&w=50&h=50&mask=circle)](https://github.com/schottra)[![46989299](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/46989299?v=4&w=50&h=50&mask=circle)](https://github.com/supreeth7)[![2816689](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/2816689?v=4&w=50&h=50&mask=circle)](https://github.com/cosmicBboy)[![19375241](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/19375241?v=4&w=50&h=50&mask=circle)](https://github.com/migueltol22)[![6065051](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6065051?v=4&w=50&h=50&mask=circle)](https://github.com/milton0825)[![70988](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/70988?v=4&w=50&h=50&mask=circle)](https://github.com/slai)[![94349093](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/94349093?v=4&w=50&h=50&mask=circle)](https://github.com/SmritiSatyanV)[![16090976](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/16090976?v=4&w=50&h=50&mask=circle)](https://github.com/surindersinghp)[![43610471](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/43610471?v=4&w=50&h=50&mask=circle)](https://github.com/TheYk98)[![53313394](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/53313394?v=4&w=50&h=50&mask=circle)](https://github.com/kosigz-lyft)[![4967458](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/4967458?v=4&w=50&h=50&mask=circle)](https://github.com/chanadian)[![467927](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/467927?v=4&w=50&h=50&mask=circle)](https://github.com/kanterov)[![248688](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/248688?v=4&w=50&h=50&mask=circle)](https://github.com/hanzo)[![1330233](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1330233?v=4&w=50&h=50&mask=circle)](https://github.com/igorvalko)[![31255434](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/31255434?v=4&w=50&h=50&mask=circle)](https://github.com/kennyworkman)[![1472826](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1472826?v=4&w=50&h=50&mask=circle)](https://github.com/maximsmol)[![5026554](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5026554?v=4&w=50&h=50&mask=circle)](https://github.com/vsbus)[![34587798](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/34587798?v=4&w=50&h=50&mask=circle)](https://github.com/akhurana001)[![11799671](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/11799671?v=4&w=50&h=50&mask=circle)](https://github.com/bstadlbauer)[![95110820](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/95110820?v=4&w=50&h=50&mask=circle)](https://github.com/jerempy)[![38207208](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/38207208?v=4&w=50&h=50&mask=circle)](https://github.com/tnsetting)[![8200209](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/8200209?v=4&w=50&h=50&mask=circle)](https://github.com/catalinii)[![24364830](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/24364830?v=4&w=50&h=50&mask=circle)](https://github.com/ByronHsu)[![43587819](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/43587819?v=4&w=50&h=50&mask=circle)](https://github.com/chetcode)[![163899](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/163899?v=4&w=50&h=50&mask=circle)](https://github.com/regadas)[![36511035](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/36511035?v=4&w=50&h=50&mask=circle)](https://github.com/fg91)[![22784654](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/22784654?v=4&w=50&h=50&mask=circle)](https://github.com/aybidi)[![1316881](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1316881?v=4&w=50&h=50&mask=circle)](https://github.com/akashkatipally)[![1777447](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1777447?v=4&w=50&h=50&mask=circle)](https://github.com/goyalankit)[![1360529](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1360529?v=4&w=50&h=50&mask=circle)](https://github.com/clairemcginty)[![104257](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/104257?v=4&w=50&h=50&mask=circle)](https://github.com/flixr)[![2538760](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/2538760?v=4&w=50&h=50&mask=circle)](https://github.com/akumor)[![11970258](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/11970258?v=4&w=50&h=50&mask=circle)](https://github.com/niliayu)[![19733683](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/19733683?v=4&w=50&h=50&mask=circle)](https://github.com/snyk-bot)[![155087](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/155087?v=4&w=50&h=50&mask=circle)](https://github.com/derwiki)[![1399455](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1399455?v=4&w=50&h=50&mask=circle)](https://github.com/th0114nd)[![21109744](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/21109744?v=4&w=50&h=50&mask=circle)](https://github.com/AlekhyaSasi)[![49699333](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/in/29110?v=4&w=50&h=50&mask=circle)](https://github.com/apps/dependabot)[![1810591](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1810591?v=4&w=50&h=50&mask=circle)](https://github.com/asottile)[![80421934](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/80421934?v=4&w=50&h=50&mask=circle)](https://github.com/SandraGH5)[![3939659](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/3939659?v=4&w=50&h=50&mask=circle)](https://github.com/sbrunk)[![9609986](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/9609986?v=4&w=50&h=50&mask=circle)](https://github.com/sonjaer)[![12219405](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/12219405?v=4&w=50&h=50&mask=circle)](https://github.com/fediazgon)[![98349643](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/98349643?v=4&w=50&h=50&mask=circle)](https://github.com/rahul-theorem)[![16509490](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/16509490?v=4&w=50&h=50&mask=circle)](https://github.com/ryankarlos)[![6774758](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6774758?v=4&w=50&h=50&mask=circle)](https://github.com/ddhirajkumar)[![18337807](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/18337807?v=4&w=50&h=50&mask=circle)](https://github.com/max-hoffman)[![322624](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/322624?v=4&w=50&h=50&mask=circle)](https://github.com/AdrianoKF)[![1168692](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1168692?v=4&w=50&h=50&mask=circle)](https://github.com/dennisobrien)[![91385411](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/91385411?v=4&w=50&h=50&mask=circle)](https://github.com/Ln11211)[![30621230](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/30621230?v=4&w=50&h=50&mask=circle)](https://github.com/aeioulisa)[![54334265](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/54334265?v=4&w=50&h=50&mask=circle)](https://github.com/michaels-lyft)[![48736656](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/48736656?v=4&w=50&h=50&mask=circle)](https://github.com/murilommen)[![17165004](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/17165004?v=4&w=50&h=50&mask=circle)](https://github.com/RobertoRRW)[![30375389](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/30375389?v=4&w=50&h=50&mask=circle)](https://github.com/bimtauer)[![97543480](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/97543480?v=4&w=50&h=50&mask=circle)](https://github.com/esadler-hbo)[![69013027](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/69013027?v=4&w=50&h=50&mask=circle)](https://github.com/ggydush-fn)[![116700206](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/116700206?v=4&w=50&h=50&mask=circle)](https://github.com/kiliangojek)[![422486](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/422486?v=4&w=50&h=50&mask=circle)](https://github.com/bethebunny)[![54333860](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/54333860?v=4&w=50&h=50&mask=circle)](https://github.com/aalavian)[![7005765](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/7005765?v=4&w=50&h=50&mask=circle)](https://github.com/convexquad)[![4025771](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/4025771?v=4&w=50&h=50&mask=circle)](https://github.com/andresgomezfrr)[![48966647](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/48966647?v=4&w=50&h=50&mask=circle)](https://github.com/asahalyft)[![77167782](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/77167782?v=4&w=50&h=50&mask=circle)](https://github.com/apatel-fn)[![23013825](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/23013825?v=4&w=50&h=50&mask=circle)](https://github.com/arpitbhardwaj)[![31381038](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/31381038?v=4&w=50&h=50&mask=circle)](https://github.com/lordnodd)[![4396228](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/4396228?v=4&w=50&h=50&mask=circle)](https://github.com/bryanwweber)[![6288302](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6288302?v=4&w=50&h=50&mask=circle)](https://github.com/CalvinLeather)[![23107192](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/23107192?v=4&w=50&h=50&mask=circle)](https://github.com/YmirKhang)[![121866694](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/121866694?v=4&w=50&h=50&mask=circle)](https://github.com/franco-bocci)[![7358951](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/7358951?v=4&w=50&h=50&mask=circle)](https://github.com/frsann)[![33652917](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/33652917?v=4&w=50&h=50&mask=circle)](https://github.com/hfurkanvural)[![6984748](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6984748?v=4&w=50&h=50&mask=circle)](https://github.com/jbrambleDC)[![488594](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/488594?v=4&w=50&h=50&mask=circle)](https://github.com/jcugat)[![20173739](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/20173739?v=4&w=50&h=50&mask=circle)](https://github.com/madhur-tandon)[![34498039](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/34498039?v=4&w=50&h=50&mask=circle)](https://github.com/matheusMoreno)[![19853373](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/19853373?v=4&w=50&h=50&mask=circle)](https://github.com/NotMatthewGriffin)[![10376195](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/10376195?v=4&w=50&h=50&mask=circle)](https://github.com/myz540)[![125105](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/125105?v=4&w=50&h=50&mask=circle)](https://github.com/tekumara)[![1153481](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1153481?v=4&w=50&h=50&mask=circle)](https://github.com/ppiegaze)[![37170063](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/37170063?v=4&w=50&h=50&mask=circle)](https://github.com/Qiwen-Yu)[![2614101](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/2614101?v=4&w=50&h=50&mask=circle)](https://github.com/RobinKa)[![4308533](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/4308533?v=4&w=50&h=50&mask=circle)](https://github.com/rubenbarragan)[![10201242](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/10201242?v=4&w=50&h=50&mask=circle)](https://github.com/sugatoray)[![11269256](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/11269256?v=4&w=50&h=50&mask=circle)](https://github.com/sushrut111)[![61228633](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/61228633?v=4&w=50&h=50&mask=circle)](https://github.com/Tat-V)[![13070236](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/13070236?v=4&w=50&h=50&mask=circle)](https://github.com/TeoZosa)[![8817639](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/8817639?v=4&w=50&h=50&mask=circle)](https://github.com/ThomVett)[![17309187](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/17309187?v=4&w=50&h=50&mask=circle)](https://github.com/datability-io)[![26834658](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/26834658?v=4&w=50&h=50&mask=circle)](https://github.com/techytushar)[![5092599](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5092599?v=4&w=50&h=50&mask=circle)](https://github.com/vchowdhary)[![57967031](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/57967031?v=4&w=50&h=50&mask=circle)](https://github.com/varshaparthay)[![67166843](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/67166843?v=4&w=50&h=50&mask=circle)](https://github.com/vvasavada-fn)[![1778407](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1778407?v=4&w=50&h=50&mask=circle)](https://github.com/ybubnov)[![51814063](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/51814063?v=4&w=50&h=50&mask=circle)](https://github.com/Yicheng-Lu-llll)[![3741621](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/3741621?v=4&w=50&h=50&mask=circle)](https://github.com/palchicz)[![12450632](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/12450632?v=4&w=50&h=50&mask=circle)](https://github.com/ajsalow)[![35151789](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/35151789?v=4&w=50&h=50&mask=circle)](https://github.com/ggydush)[![13331724](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/13331724?v=4&w=50&h=50&mask=circle)](https://github.com/martinlyra)[![119345186](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/119345186?v=4&w=50&h=50&mask=circle)](https://github.com/mcloney-ddm)[![1521126](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1521126?v=4&w=50&h=50&mask=circle)](https://github.com/pbrogan12)[![73247359](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/73247359?v=4&w=50&h=50&mask=circle)](https://github.com/stef-stripe)[![50860453](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/50860453?v=4&w=50&h=50&mask=circle)](https://github.com/charlie0220)[![6506810](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6506810?v=4&w=50&h=50&mask=circle)](https://github.com/stephen37)[![55718143](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/55718143?v=4&w=50&h=50&mask=circle)](https://github.com/anrusina)[![65977800](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/65977800?v=4&w=50&h=50&mask=circle)](https://github.com/service-github-lyft-semantic-release)[![6610300](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6610300?v=4&w=50&h=50&mask=circle)](https://github.com/ursucarina)[![84735036](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/84735036?v=4&w=50&h=50&mask=circle)](https://github.com/jsonporter)[![85753828](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/85753828?v=4&w=50&h=50&mask=circle)](https://github.com/csirius)[![101579322](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/101579322?v=4&w=50&h=50&mask=circle)](https://github.com/olga-union)[![26953709](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/26953709?v=4&w=50&h=50&mask=circle)](https://github.com/Pianist038801)[![105876962](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/105876962?v=4&w=50&h=50&mask=circle)](https://github.com/james-union)[![25038146](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/25038146?v=4&w=50&h=50&mask=circle)](https://github.com/eugenejahn)[![88684372](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/88684372?v=4&w=50&h=50&mask=circle)](https://github.com/4nalog)[![99441958](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/99441958?v=4&w=50&h=50&mask=circle)](https://github.com/apTalya)[![1388071](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1388071?v=4&w=50&h=50&mask=circle)](https://github.com/aviaviavi)[![58770001](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/58770001?v=4&w=50&h=50&mask=circle)](https://github.com/Professional0321)[![20668349](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/20668349?v=4&w=50&h=50&mask=circle)](https://github.com/HiromuHota)[![100569684](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/100569684?v=4&w=50&h=50&mask=circle)](https://github.com/rafaelraposospot)[![17351764](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/17351764?v=4&w=50&h=50&mask=circle)](https://github.com/daniel-shuy)[![6399428](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6399428?v=4&w=50&h=50&mask=circle)](https://github.com/live-wire)[![25695302](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/25695302?v=4&w=50&h=50&mask=circle)](https://github.com/sisco0)[![18363301](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/18363301?v=4&w=50&h=50&mask=circle)](https://github.com/jimbobby5)[![4023015](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/4023015?v=4&w=50&h=50&mask=circle)](https://github.com/pradithya)[![3451399](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/3451399?v=4&w=50&h=50&mask=circle)](https://github.com/skiptomyliu)[![25364490](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/25364490?v=4&w=50&h=50&mask=circle)](https://github.com/haoyuez)[![50679871](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/50679871?v=4&w=50&h=50&mask=circle)](https://github.com/lupasarin)[![7548823](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/7548823?v=4&w=50&h=50&mask=circle)](https://github.com/Dread1982)[![7515359](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/7515359?v=4&w=50&h=50&mask=circle)](https://github.com/narape)[![31982395](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/31982395?v=4&w=50&h=50&mask=circle)](https://github.com/alexapdev)[![62209650](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/62209650?v=4&w=50&h=50&mask=circle)](https://github.com/3t8)[![1892175](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1892175?v=4&w=50&h=50&mask=circle)](https://github.com/zeryx)[![200401](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/200401?v=4&w=50&h=50&mask=circle)](https://github.com/arturdryomov)[![13770222](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/13770222?v=4&w=50&h=50&mask=circle)](https://github.com/ChickenTarm)[![2380665](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/2380665?v=4&w=50&h=50&mask=circle)](https://github.com/DavidMertz)[![24739949](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/24739949?v=4&w=50&h=50&mask=circle)](https://github.com/felixwang9817)[![10430635](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/10430635?v=4&w=50&h=50&mask=circle)](https://github.com/juandiegopalomino)[![31911175](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/31911175?v=4&w=50&h=50&mask=circle)](https://github.com/kanyesthaker)[![104152793](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/104152793?v=4&w=50&h=50&mask=circle)](https://github.com/marc-union)[![27818609](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/27818609?v=4&w=50&h=50&mask=circle)](https://github.com/michaeltinsley)[![6486584](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6486584?v=4&w=50&h=50&mask=circle)](https://github.com/mucahitkantepe)[![321459](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/321459?v=4&w=50&h=50&mask=circle)](https://github.com/oyevtushok)[![35962310](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/35962310?v=4&w=50&h=50&mask=circle)](https://github.com/trishitapingolia)[![91927689](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/91927689?v=4&w=50&h=50&mask=circle)](https://github.com/Smartmind12)[![726061](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/726061?v=4&w=50&h=50&mask=circle)](https://github.com/huxuan)[![47872044](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/47872044?v=4&w=50&h=50&mask=circle)](https://github.com/privatedumbo)[![105229971](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/105229971?v=4&w=50&h=50&mask=circle)](https://github.com/tjKairos)[![405480](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/405480?v=4&w=50&h=50&mask=circle)](https://github.com/georgesnelling)[![1004789](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1004789?v=4&w=50&h=50&mask=circle)](https://github.com/dschaller)[![82604841](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/82604841?v=4&w=50&h=50&mask=circle)](https://github.com/davidmirror-ops)[![480621](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/480621?v=4&w=50&h=50&mask=circle)](https://github.com/davidxia)[![1335881](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1335881?v=4&w=50&h=50&mask=circle)](https://github.com/hoyajigi)[![100597998](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/100597998?v=4&w=50&h=50&mask=circle)](https://github.com/MrKrishnaAgarwal)[![4830700](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/4830700?v=4&w=50&h=50&mask=circle)](https://github.com/NitinAgg)[![69161722](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/69161722?v=4&w=50&h=50&mask=circle)](https://github.com/noobkid2411)[![43336767](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/43336767?v=4&w=50&h=50&mask=circle)](https://github.com/yongchand)[![25391173](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/25391173?v=4&w=50&h=50&mask=circle)](https://github.com/nicklofaso)[![66388192](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/66388192?v=4&w=50&h=50&mask=circle)](https://github.com/mounesi)[![14992189](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/14992189?v=4&w=50&h=50&mask=circle)](https://github.com/eanakhl)[![1175392](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1175392?v=4&w=50&h=50&mask=circle)](https://github.com/adinin)[![7475946](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/7475946?v=4&w=50&h=50&mask=circle)](https://github.com/anton-malakhov)[![11796986](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/11796986?v=4&w=50&h=50&mask=circle)](https://github.com/avan-sh)[![304786](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/304786?v=4&w=50&h=50&mask=circle)](https://github.com/kinow)[![24402505](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/24402505?v=4&w=50&h=50&mask=circle)](https://github.com/Daeruin)[![1659415](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1659415?v=4&w=50&h=50&mask=circle)](https://github.com/dav009)[![86911142](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/86911142?v=4&w=50&h=50&mask=circle)](https://github.com/idivyanshbansal)[![11456773](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/11456773?v=4&w=50&h=50&mask=circle)](https://github.com/fvde)[![7490199](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/7490199?v=4&w=50&h=50&mask=circle)](https://github.com/Lundez)[![10345184](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/10345184?v=4&w=50&h=50&mask=circle)](https://github.com/hasukmistry)[![29532638](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/29532638?v=4&w=50&h=50&mask=circle)](https://github.com/rokrokss)[![14008978](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/14008978?v=4&w=50&h=50&mask=circle)](https://github.com/jeremydonahue)[![9272376](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/9272376?v=4&w=50&h=50&mask=circle)](https://github.com/jonasdebeukelaer)[![1633460](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1633460?v=4&w=50&h=50&mask=circle)](https://github.com/jmcarp)[![3033592](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/3033592?v=4&w=50&h=50&mask=circle)](https://github.com/kazesberger)[![19229049](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/19229049?v=4&w=50&h=50&mask=circle)](https://github.com/lsena)[![36594527](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/36594527?v=4&w=50&h=50&mask=circle)](https://github.com/mishmanners)[![8755869](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/8755869?v=4&w=50&h=50&mask=circle)](https://github.com/paravatha)[![6528449](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6528449?v=4&w=50&h=50&mask=circle)](https://github.com/uschi2000)[![790725](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/790725?v=4&w=50&h=50&mask=circle)](https://github.com/rodrigobaron)[![576968](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/576968?v=4&w=50&h=50&mask=circle)](https://github.com/ronaldosaheki)[![36827492](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/36827492?v=4&w=50&h=50&mask=circle)](https://github.com/shahwar9)[![133936](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/133936?v=4&w=50&h=50&mask=circle)](https://github.com/shihgianlee)[![10438373](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/10438373?v=4&w=50&h=50&mask=circle)](https://github.com/SKalt)[![33272587](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/33272587?v=4&w=50&h=50&mask=circle)](https://github.com/samuel-sujith)[![580328](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/580328?v=4&w=50&h=50&mask=circle)](https://github.com/ilikedata)[![1027207](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1027207?v=4&w=50&h=50&mask=circle)](https://github.com/orf)[![16526627](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/16526627?v=4&w=50&h=50&mask=circle)](https://github.com/vijaysaravana)[![10526540](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/10526540?v=4&w=50&h=50&mask=circle)](https://github.com/yubofredwang)[![5346764](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5346764?v=4&w=50&h=50&mask=circle)](https://github.com/fsz285)[![22917741](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/22917741?v=4&w=50&h=50&mask=circle)](https://github.com/gigi-at-zymergen)[![40143026](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/40143026?v=4&w=50&h=50&mask=circle)](https://github.com/hampusrosvall)[![77197126](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/77197126?v=4&w=50&h=50&mask=circle)](https://github.com/hitarth01)[![300315](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/300315?v=4&w=50&h=50&mask=circle)](https://github.com/jcourteau)[![106815366](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/106815366?v=4&w=50&h=50&mask=circle)](https://github.com/jw0515)[![1568889](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1568889?v=4&w=50&h=50&mask=circle)](https://github.com/leorleor)[![937967](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/937967?v=4&w=50&h=50&mask=circle)](https://github.com/moose007)[![114232404](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/114232404?v=4&w=50&h=50&mask=circle)](https://github.com/sanjaychouhan-adf)[![14996868](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/14996868?v=4&w=50&h=50&mask=circle)](https://github.com/v01dXYZ)[![93438190](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/93438190?v=4&w=50&h=50&mask=circle)](https://github.com/wanderer163)[![1043051](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1043051?v=4&w=50&h=50&mask=circle)](https://github.com/kylewaynebenson)[![21953442](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/21953442?v=4&w=50&h=50&mask=circle)](https://github.com/Gui11aum3)[![16461847](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/16461847?v=4&w=50&h=50&mask=circle)](https://github.com/JakeNeyer)[![64676594](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/64676594?v=4&w=50&h=50&mask=circle)](https://github.com/abhijeet007rocks8)[![1174730](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1174730?v=4&w=50&h=50&mask=circle)](https://github.com/mouuff)[![20135478](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/20135478?v=4&w=50&h=50&mask=circle)](https://github.com/Juneezee)[![151841](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/151841?v=4&w=50&h=50&mask=circle)](https://github.com/goodgravy)[![44368997](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/44368997?v=4&w=50&h=50&mask=circle)](https://github.com/radiantly)[![36989112](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/36989112?v=4&w=50&h=50&mask=circle)](https://github.com/nishantwrp)[![7144772](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/7144772?v=4&w=50&h=50&mask=circle)](https://github.com/sighingnow)[![697033](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/697033?v=4&w=50&h=50&mask=circle)](https://github.com/vglocus)[![2845540](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/2845540?v=4&w=50&h=50&mask=circle)](https://github.com/RustedBones)[![4056828](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/4056828?v=4&w=50&h=50&mask=circle)](https://github.com/pablocasares)[![1071153](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1071153?v=4&w=50&h=50&mask=circle)](https://github.com/evdokim)[![5732047](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5732047?v=4&w=50&h=50&mask=circle)](https://github.com/stormy-ua)[![471021](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/471021?v=4&w=50&h=50&mask=circle)](https://github.com/marschall)[![71284190](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/71284190?v=4&w=50&h=50&mask=circle)](https://github.com/gdungca-fn)[![26265392](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/26265392?v=4&w=50&h=50&mask=circle)](https://github.com/ttanay)[![85021780](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/85021780?v=4&w=50&h=50&mask=circle)](https://github.com/Abdullahi-Ahmed)[![48512530](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/48512530?v=4&w=50&h=50&mask=circle)](https://github.com/amaleelhamri)[![3275593](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/3275593?v=4&w=50&h=50&mask=circle)](https://github.com/pradyunsg)[![66853113](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/in/68672?v=4&w=50&h=50&mask=circle)](https://github.com/apps/pre-commit-ci)[![1834509](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1834509?v=4&w=50&h=50&mask=circle)](https://github.com/jdknight)[![107893](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/107893?v=4&w=50&h=50&mask=circle)](https://github.com/kmike)[![1324225](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1324225?v=4&w=50&h=50&mask=circle)](https://github.com/hugovk)[![1300022](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1300022?v=4&w=50&h=50&mask=circle)](https://github.com/sirosen)[![244656](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/244656?v=4&w=50&h=50&mask=circle)](https://github.com/humitos)[![467294](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/467294?v=4&w=50&h=50&mask=circle)](https://github.com/bastimeyer)[![71486](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/71486?v=4&w=50&h=50&mask=circle)](https://github.com/asmeurer)[![20280470](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/20280470?v=4&w=50&h=50&mask=circle)](https://github.com/drewyh)[![3533182](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/3533182?v=4&w=50&h=50&mask=circle)](https://github.com/polyzen)[![199429](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/199429?v=4&w=50&h=50&mask=circle)](https://github.com/dvarrazzo)[![1032633](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1032633?v=4&w=50&h=50&mask=circle)](https://github.com/dbitouze)[![1313087](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1313087?v=4&w=50&h=50&mask=circle)](https://github.com/idryzhov)[![521097](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/521097?v=4&w=50&h=50&mask=circle)](https://github.com/pauloxnet)[![63936253](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/63936253?v=4&w=50&h=50&mask=circle)](https://github.com/ichard26)[![18519037](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/18519037?v=4&w=50&h=50&mask=circle)](https://github.com/sethmlarson)[![413772](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/413772?v=4&w=50&h=50&mask=circle)](https://github.com/graingert)[![11478411](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/11478411?v=4&w=50&h=50&mask=circle)](https://github.com/stonecharioteer)[![6739793](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6739793?v=4&w=50&h=50&mask=circle)](https://github.com/yeraydiazdiaz)[![83365562](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/83365562?v=4&w=50&h=50&mask=circle)](https://github.com/eviau-sat)[![6670894](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6670894?v=4&w=50&h=50&mask=circle)](https://github.com/rozsasarpi)[![86675](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/86675?v=4&w=50&h=50&mask=circle)](https://github.com/estan)[![4748863](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/4748863?v=4&w=50&h=50&mask=circle)](https://github.com/pseudomuto)[![181308](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/181308?v=4&w=50&h=50&mask=circle)](https://github.com/htdvisser)[![1390277](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1390277?v=4&w=50&h=50&mask=circle)](https://github.com/jacobtolar)[![1391982](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1391982?v=4&w=50&h=50&mask=circle)](https://github.com/ezimanyi)[![3880001](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/3880001?v=4&w=50&h=50&mask=circle)](https://github.com/lpabon)[![770392](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/770392?v=4&w=50&h=50&mask=circle)](https://github.com/ArcEye)[![6178510](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6178510?v=4&w=50&h=50&mask=circle)](https://github.com/mingrammer)[![5111931](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5111931?v=4&w=50&h=50&mask=circle)](https://github.com/aschrijver)[![873434](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/873434?v=4&w=50&h=50&mask=circle)](https://github.com/panzerfahrer)[![16724](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/16724?v=4&w=50&h=50&mask=circle)](https://github.com/glasser)[![17330872](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/17330872?v=4&w=50&h=50&mask=circle)](https://github.com/murph0)[![419419](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/419419?v=4&w=50&h=50&mask=circle)](https://github.com/zetaron)[![1014](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1014?v=4&w=50&h=50&mask=circle)](https://github.com/sunfmin)[![504507](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/504507?v=4&w=50&h=50&mask=circle)](https://github.com/guozheng)[![8841470](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/8841470?v=4&w=50&h=50&mask=circle)](https://github.com/suusan2go)[![901479](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/901479?v=4&w=50&h=50&mask=circle)](https://github.com/mhaberler)[![6400253](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6400253?v=4&w=50&h=50&mask=circle)](https://github.com/s4ichi)[![353644](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/353644?v=4&w=50&h=50&mask=circle)](https://github.com/dreampuf)[![12421077](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/12421077?v=4&w=50&h=50&mask=circle)](https://github.com/UnicodingUnicorn)[![809865](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/809865?v=4&w=50&h=50&mask=circle)](https://github.com/philiptzou)[![19378](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/19378?v=4&w=50&h=50&mask=circle)](https://github.com/timabell)[![1113245](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1113245?v=4&w=50&h=50&mask=circle)](https://github.com/jasonhancock)[![101659](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/101659?v=4&w=50&h=50&mask=circle)](https://github.com/matryer)[![4730508](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/4730508?v=4&w=50&h=50&mask=circle)](https://github.com/piotrrojek)[![33036160](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/33036160?v=4&w=50&h=50&mask=circle)](https://github.com/jasonsattler)[![470810](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/470810?v=4&w=50&h=50&mask=circle)](https://github.com/sbward)[![7592392](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/7592392?v=4&w=50&h=50&mask=circle)](https://github.com/Pisush)[![94814](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/94814?v=4&w=50&h=50&mask=circle)](https://github.com/tamalsaha)[![8147854](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/8147854?v=4&w=50&h=50&mask=circle)](https://github.com/marianina8)[![1005](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1005?v=4&w=50&h=50&mask=circle)](https://github.com/ernesto-jimenez)[![17263167](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/17263167?v=4&w=50&h=50&mask=circle)](https://github.com/jsteenb2)[![2807589](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/2807589?v=4&w=50&h=50&mask=circle)](https://github.com/darwayne)[![1683714](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1683714?v=4&w=50&h=50&mask=circle)](https://github.com/naysayer)[![6386887](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6386887?v=4&w=50&h=50&mask=circle)](https://github.com/AgrimPrasad)[![615811](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/615811?v=4&w=50&h=50&mask=circle)](https://github.com/dahernan)[![75184](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/75184?v=4&w=50&h=50&mask=circle)](https://github.com/jtarchie)[![469669](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/469669?v=4&w=50&h=50&mask=circle)](https://github.com/jdtobe)[![28523](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/28523?v=4&w=50&h=50&mask=circle)](https://github.com/alrs)[![10113228](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/10113228?v=4&w=50&h=50&mask=circle)](https://github.com/urisimchoni)[![5751464](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5751464?v=4&w=50&h=50&mask=circle)](https://github.com/Xercoy)[![2405410](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/2405410?v=4&w=50&h=50&mask=circle)](https://github.com/marbergq)[![5082160](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5082160?v=4&w=50&h=50&mask=circle)](https://github.com/anothrNick)[![11335612](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/11335612?v=4&w=50&h=50&mask=circle)](https://github.com/fermoya)[![23391642](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/23391642?v=4&w=50&h=50&mask=circle)](https://github.com/sbe-arg)[![1024762](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1024762?v=4&w=50&h=50&mask=circle)](https://github.com/PeerXu)[![7390781](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/7390781?v=4&w=50&h=50&mask=circle)](https://github.com/reececomo)[![49680](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/49680?v=4&w=50&h=50&mask=circle)](https://github.com/dmerrick)[![87524](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/87524?v=4&w=50&h=50&mask=circle)](https://github.com/andrewcole)[![866505](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/866505?v=4&w=50&h=50&mask=circle)](https://github.com/phish108)[![2611549](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/2611549?v=4&w=50&h=50&mask=circle)](https://github.com/endrjuskr)[![49961058](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/49961058?v=4&w=50&h=50&mask=circle)](https://github.com/bevans-HD)[![5655837](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5655837?v=4&w=50&h=50&mask=circle)](https://github.com/gukoff)[![8320753](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/8320753?v=4&w=50&h=50&mask=circle)](https://github.com/lovromazgon)[![16513382](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/16513382?v=4&w=50&h=50&mask=circle)](https://github.com/117)[![3807434](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/3807434?v=4&w=50&h=50&mask=circle)](https://github.com/tomsolem)[![118945041](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/118945041?v=4&w=50&h=50&mask=circle)](https://github.com/vq-ambiata)[![8232503](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/8232503?v=4&w=50&h=50&mask=circle)](https://github.com/sjauld)[![69170839](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/69170839?v=4&w=50&h=50&mask=circle)](https://github.com/adam-berrio)[![6388483](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6388483?v=4&w=50&h=50&mask=circle)](https://github.com/zsedem)[![8296645](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/8296645?v=4&w=50&h=50&mask=circle)](https://github.com/imdanielsp)[![17337515](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/17337515?v=4&w=50&h=50&mask=circle)](https://github.com/fabricepipart)[![10090384](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/10090384?v=4&w=50&h=50&mask=circle)](https://github.com/ivanpk)[![2302957](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/2302957?v=4&w=50&h=50&mask=circle)](https://github.com/JeremyLWright)[![995707](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/995707?v=4&w=50&h=50&mask=circle)](https://github.com/OskarStark)[![25486791](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/25486791?v=4&w=50&h=50&mask=circle)](https://github.com/pavyarov)[![5067549](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5067549?v=4&w=50&h=50&mask=circle)](https://github.com/pellared)[![53085803](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/53085803?v=4&w=50&h=50&mask=circle)](https://github.com/cuttingedge1109)[![62775347](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/62775347?v=4&w=50&h=50&mask=circle)](https://github.com/okozachenko1203)[![25625597](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/25625597?v=4&w=50&h=50&mask=circle)](https://github.com/zero-below)[![282792](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/282792?v=4&w=50&h=50&mask=circle)](https://github.com/asford)[![38894122](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/38894122?v=4&w=50&h=50&mask=circle)](https://github.com/bmcconeghy)[![16698198](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/16698198?v=4&w=50&h=50&mask=circle)](https://github.com/conda-forge-admin)[![36490558](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/36490558?v=4&w=50&h=50&mask=circle)](https://github.com/regro-cf-autotick-bot)[![79913779](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/in/102928?v=4&w=50&h=50&mask=circle)](https://github.com/apps/conda-forge-curator)[![41898282](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/in/15368?v=4&w=50&h=50&mask=circle)](https://github.com/apps/github-actions)[![18567580](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/18567580?v=4&w=50&h=50&mask=circle)](https://github.com/conda-forge-linter)[![26092524](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/26092524?v=4&w=50&h=50&mask=circle)](https://github.com/fellhorn) - +[![953358](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/953358?v=4&w=50&h=50&mask=circle)](https://github.com/katrogan)[![37090125](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/37090125?v=4&w=50&h=50&mask=circle)](https://github.com/lyft-metaservice-3)[![7597118](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/7597118?v=4&w=50&h=50&mask=circle)](https://github.com/matthewphsmith)[![27159](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/27159?v=4&w=50&h=50&mask=circle)](https://github.com/EngHabu)[![29843943](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/29843943?v=4&w=50&h=50&mask=circle)](https://github.com/goreleaserbot)[![8888115](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/8888115?v=4&w=50&h=50&mask=circle)](https://github.com/hamersaw)[![10830562](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/10830562?v=4&w=50&h=50&mask=circle)](https://github.com/yindia)[![78108056](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/78108056?v=4&w=50&h=50&mask=circle)](https://github.com/flyte-bot)[![158892](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/158892?v=4&w=50&h=50&mask=circle)](https://github.com/honnix)[![18408237](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/18408237?v=4&w=50&h=50&mask=circle)](https://github.com/anandswaminathan)[![2896568](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/2896568?v=4&w=50&h=50&mask=circle)](https://github.com/wild-endeavor)[![37936015](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/37936015?v=4&w=50&h=50&mask=circle)](https://github.com/pingsutw)[![653394](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/653394?v=4&w=50&h=50&mask=circle)](https://github.com/eapolinario)[![1518524](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1518524?v=4&w=50&h=50&mask=circle)](https://github.com/bnsblue)[![27724763](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/27724763?v=4&w=50&h=50&mask=circle)](https://github.com/iaroslav-ciupin)[![16888709](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/16888709?v=4&w=50&h=50&mask=circle)](https://github.com/kumare3)[![27777173](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/27777173?v=4&w=50&h=50&mask=circle)](https://github.com/samhita-alla)[![23062603](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/23062603?v=4&w=50&h=50&mask=circle)](https://github.com/Antaxify)[![77798312](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/77798312?v=4&w=50&h=50&mask=circle)](https://github.com/pmahindrakar-oss)[![5032356](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5032356?v=4&w=50&h=50&mask=circle)](https://github.com/brucearctor)[![8805803](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/8805803?v=4&w=50&h=50&mask=circle)](https://github.com/alexlipa91)[![6239450](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6239450?v=4&w=50&h=50&mask=circle)](https://github.com/mayitbeegh)[![452166](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/452166?v=4&w=50&h=50&mask=circle)](https://github.com/MorpheusXAUT)[![15335863](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/15335863?v=4&w=50&h=50&mask=circle)](https://github.com/gvashishtha)[![6562898](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6562898?v=4&w=50&h=50&mask=circle)](https://github.com/ckiosidis)[![4748985](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/4748985?v=4&w=50&h=50&mask=circle)](https://github.com/aliabbasjaffri)[![76461262](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/76461262?v=4&w=50&h=50&mask=circle)](https://github.com/Future-Outlier)[![5725707](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5725707?v=4&w=50&h=50&mask=circle)](https://github.com/andrewwdye)[![8122852](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/8122852?v=4&w=50&h=50&mask=circle)](https://github.com/ariefrahmansyah)[![10869815](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/10869815?v=4&w=50&h=50&mask=circle)](https://github.com/jeevb)[![3880645](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/3880645?v=4&w=50&h=50&mask=circle)](https://github.com/jonathanburns)[![3936213](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/3936213?v=4&w=50&h=50&mask=circle)](https://github.com/lu4nm3)[![26174213](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/26174213?v=4&w=50&h=50&mask=circle)](https://github.com/lyft-metaservice-2)[![126913098](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/126913098?v=4&w=50&h=50&mask=circle)](https://github.com/squiishyy)[![46989299](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/46989299?v=4&w=50&h=50&mask=circle)](https://github.com/supreeth7)[![1815175](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1815175?v=4&w=50&h=50&mask=circle)](https://github.com/schottra)[![37558497](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/37558497?v=4&w=50&h=50&mask=circle)](https://github.com/pvditt)[![5487021](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5487021?v=4&w=50&h=50&mask=circle)](https://github.com/veggiemonk)[![9142716](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/9142716?v=4&w=50&h=50&mask=circle)](https://github.com/2uasimojo)[![2816689](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/2816689?v=4&w=50&h=50&mask=circle)](https://github.com/cosmicBboy)[![19375241](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/19375241?v=4&w=50&h=50&mask=circle)](https://github.com/migueltol22)[![24364830](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/24364830?v=4&w=50&h=50&mask=circle)](https://github.com/ByronHsu)[![53313394](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/53313394?v=4&w=50&h=50&mask=circle)](https://github.com/kosigz-lyft)[![43610471](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/43610471?v=4&w=50&h=50&mask=circle)](https://github.com/yk-x-25)[![10526540](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/10526540?v=4&w=50&h=50&mask=circle)](https://github.com/yubofredwang)[![16090976](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/16090976?v=4&w=50&h=50&mask=circle)](https://github.com/surindersinghp)[![94349093](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/94349093?v=4&w=50&h=50&mask=circle)](https://github.com/SmritiSatyanV)[![70988](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/70988?v=4&w=50&h=50&mask=circle)](https://github.com/slai)[![6065051](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6065051?v=4&w=50&h=50&mask=circle)](https://github.com/milton0825)[![38207208](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/38207208?v=4&w=50&h=50&mask=circle)](https://github.com/tnsetting)[![95110820](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/95110820?v=4&w=50&h=50&mask=circle)](https://github.com/jerempy)[![11799671](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/11799671?v=4&w=50&h=50&mask=circle)](https://github.com/bstadlbauer)[![34587798](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/34587798?v=4&w=50&h=50&mask=circle)](https://github.com/akhurana001)[![5026554](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5026554?v=4&w=50&h=50&mask=circle)](https://github.com/vsbus)[![1472826](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1472826?v=4&w=50&h=50&mask=circle)](https://github.com/maximsmol)[![31255434](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/31255434?v=4&w=50&h=50&mask=circle)](https://github.com/kennyworkman)[![1330233](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1330233?v=4&w=50&h=50&mask=circle)](https://github.com/igorvalko)[![248688](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/248688?v=4&w=50&h=50&mask=circle)](https://github.com/hanzo)[![467927](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/467927?v=4&w=50&h=50&mask=circle)](https://github.com/kanterov)[![36511035](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/36511035?v=4&w=50&h=50&mask=circle)](https://github.com/fg91)[![4967458](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/4967458?v=4&w=50&h=50&mask=circle)](https://github.com/chanadian)[![8200209](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/8200209?v=4&w=50&h=50&mask=circle)](https://github.com/catalinii)[![43587819](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/43587819?v=4&w=50&h=50&mask=circle)](https://github.com/chetcode)[![163899](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/163899?v=4&w=50&h=50&mask=circle)](https://github.com/regadas)[![54248170](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/54248170?v=4&w=50&h=50&mask=circle)](https://github.com/nicholasjng)[![2538760](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/2538760?v=4&w=50&h=50&mask=circle)](https://github.com/akumor)[![104257](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/104257?v=4&w=50&h=50&mask=circle)](https://github.com/flixr)[![92917168](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/92917168?v=4&w=50&h=50&mask=circle)](https://github.com/edwinyyyu)[![1360529](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1360529?v=4&w=50&h=50&mask=circle)](https://github.com/clairemcginty)[![1777447](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1777447?v=4&w=50&h=50&mask=circle)](https://github.com/goyalankit)[![1316881](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1316881?v=4&w=50&h=50&mask=circle)](https://github.com/akashkatipally)[![22784654](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/22784654?v=4&w=50&h=50&mask=circle)](https://github.com/aybidi)[![5402633](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5402633?v=4&w=50&h=50&mask=circle)](https://github.com/thomasjpfan)[![49699333](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/in/29110?v=4&w=50&h=50&mask=circle)](https://github.com/apps/dependabot)[![72752478](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/72752478?v=4&w=50&h=50&mask=circle)](https://github.com/Mecoli1219)[![19733683](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/19733683?v=4&w=50&h=50&mask=circle)](https://github.com/snyk-bot)[![114708546](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/114708546?v=4&w=50&h=50&mask=circle)](https://github.com/troychiu)[![35886692](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/35886692?v=4&w=50&h=50&mask=circle)](https://github.com/austin362667)[![47914085](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/47914085?v=4&w=50&h=50&mask=circle)](https://github.com/MortalHappiness)[![9131935](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/9131935?v=4&w=50&h=50&mask=circle)](https://github.com/Tom-Newton)[![155087](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/155087?v=4&w=50&h=50&mask=circle)](https://github.com/derwiki)[![40698988](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/40698988?v=4&w=50&h=50&mask=circle)](https://github.com/dansola)[![14800485](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/14800485?v=4&w=50&h=50&mask=circle)](https://github.com/jasonlai1218)[![62143443](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/62143443?v=4&w=50&h=50&mask=circle)](https://github.com/mao3267)[![31577879](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/31577879?v=4&w=50&h=50&mask=circle)](https://github.com/pryce-turner)[![1399455](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1399455?v=4&w=50&h=50&mask=circle)](https://github.com/th0114nd)[![58504997](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/58504997?v=4&w=50&h=50&mask=circle)](https://github.com/novahow)[![46030368](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/46030368?v=4&w=50&h=50&mask=circle)](https://github.com/ChungYujoyce)[![21109744](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/21109744?v=4&w=50&h=50&mask=circle)](https://github.com/AlekhyaSasi)[![1810591](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1810591?v=4&w=50&h=50&mask=circle)](https://github.com/asottile)[![54340816](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/54340816?v=4&w=50&h=50&mask=circle)](https://github.com/granthamtaylor)[![89976021](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/89976021?v=4&w=50&h=50&mask=circle)](https://github.com/fiedlerNr9)[![51814063](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/51814063?v=4&w=50&h=50&mask=circle)](https://github.com/Yicheng-Lu-llll)[![9609986](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/9609986?v=4&w=50&h=50&mask=circle)](https://github.com/sonjaer)[![1153481](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1153481?v=4&w=50&h=50&mask=circle)](https://github.com/ppiegaze)[![35151789](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/35151789?v=4&w=50&h=50&mask=circle)](https://github.com/ggydush)[![140021987](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/140021987?v=4&w=50&h=50&mask=circle)](https://github.com/ddl-rliu)[![138256885](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/138256885?v=4&w=50&h=50&mask=circle)](https://github.com/ysysys3074)[![3939659](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/3939659?v=4&w=50&h=50&mask=circle)](https://github.com/sbrunk)[![80421934](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/80421934?v=4&w=50&h=50&mask=circle)](https://github.com/SandraGH5)[![52046377](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/52046377?v=4&w=50&h=50&mask=circle)](https://github.com/hhcs9527)[![4406268](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/4406268?v=4&w=50&h=50&mask=circle)](https://github.com/otarabai)[![16709018](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/16709018?v=4&w=50&h=50&mask=circle)](https://github.com/noahjax)[![417209](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/417209?v=4&w=50&h=50&mask=circle)](https://github.com/neverett)[![27844407](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/27844407?v=4&w=50&h=50&mask=circle)](https://github.com/ringohoffman)[![106939297](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/106939297?v=4&w=50&h=50&mask=circle)](https://github.com/chaohengstudent)[![380854](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/380854?v=4&w=50&h=50&mask=circle)](https://github.com/bgedik)[![18337807](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/18337807?v=4&w=50&h=50&mask=circle)](https://github.com/max-hoffman)[![1276867](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1276867?v=4&w=50&h=50&mask=circle)](https://github.com/JackUrb)[![115421902](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/115421902?v=4&w=50&h=50&mask=circle)](https://github.com/wayner0628)[![36886416](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/36886416?v=4&w=50&h=50&mask=circle)](https://github.com/JiangJiaWei1103)[![134093844](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/134093844?v=4&w=50&h=50&mask=circle)](https://github.com/rdeaton-freenome)[![106936600](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/106936600?v=4&w=50&h=50&mask=circle)](https://github.com/peridotml)[![26268253](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/26268253?v=4&w=50&h=50&mask=circle)](https://github.com/arbaobao)[![16509490](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/16509490?v=4&w=50&h=50&mask=circle)](https://github.com/ryankarlos)[![98242479](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/98242479?v=4&w=50&h=50&mask=circle)](https://github.com/RichhLi)[![98349643](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/98349643?v=4&w=50&h=50&mask=circle)](https://github.com/rahul-theorem)[![12219405](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/12219405?v=4&w=50&h=50&mask=circle)](https://github.com/fediazgon)[![322624](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/322624?v=4&w=50&h=50&mask=circle)](https://github.com/AdrianoKF)[![953385](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/953385?v=4&w=50&h=50&mask=circle)](https://github.com/blaketastic2)[![30375389](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/30375389?v=4&w=50&h=50&mask=circle)](https://github.com/bimtauer)[![92072956](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/92072956?v=4&w=50&h=50&mask=circle)](https://github.com/PudgyPigeon)[![97543480](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/97543480?v=4&w=50&h=50&mask=circle)](https://github.com/esadler-hbo)[![69013027](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/69013027?v=4&w=50&h=50&mask=circle)](https://github.com/ggydush-fn)[![116700206](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/116700206?v=4&w=50&h=50&mask=circle)](https://github.com/kiliangojek)[![1521126](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1521126?v=4&w=50&h=50&mask=circle)](https://github.com/pbrogan12)[![120470035](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/120470035?v=4&w=50&h=50&mask=circle)](https://github.com/redartera)[![4025771](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/4025771?v=4&w=50&h=50&mask=circle)](https://github.com/andresgomezfrr)[![422486](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/422486?v=4&w=50&h=50&mask=circle)](https://github.com/bethebunny)[![26092524](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/26092524?v=4&w=50&h=50&mask=circle)](https://github.com/fellhorn)[![1168692](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1168692?v=4&w=50&h=50&mask=circle)](https://github.com/dennisobrien)[![33652917](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/33652917?v=4&w=50&h=50&mask=circle)](https://github.com/hfurkanvural)[![45017130](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/45017130?v=4&w=50&h=50&mask=circle)](https://github.com/helenzhangyc)[![1659910](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1659910?v=4&w=50&h=50&mask=circle)](https://github.com/oliverhu)[![91385411](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/91385411?v=4&w=50&h=50&mask=circle)](https://github.com/Ln11211)[![30621230](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/30621230?v=4&w=50&h=50&mask=circle)](https://github.com/aeioulisa)[![54334265](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/54334265?v=4&w=50&h=50&mask=circle)](https://github.com/michaels-lyft)[![48736656](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/48736656?v=4&w=50&h=50&mask=circle)](https://github.com/murilommen)[![150836163](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/150836163?v=4&w=50&h=50&mask=circle)](https://github.com/neilisaur)[![17165004](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/17165004?v=4&w=50&h=50&mask=circle)](https://github.com/RobertoRRW)[![81233629](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/81233629?v=4&w=50&h=50&mask=circle)](https://github.com/101rakibulhasan)[![38955457](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/38955457?v=4&w=50&h=50&mask=circle)](https://github.com/RRK1000)[![2614101](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/2614101?v=4&w=50&h=50&mask=circle)](https://github.com/RobinKa)[![4308533](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/4308533?v=4&w=50&h=50&mask=circle)](https://github.com/rubenbarragan)[![10201242](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/10201242?v=4&w=50&h=50&mask=circle)](https://github.com/sugatoray)[![11269256](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/11269256?v=4&w=50&h=50&mask=circle)](https://github.com/sushrut111)[![61228633](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/61228633?v=4&w=50&h=50&mask=circle)](https://github.com/Tat-V)[![13070236](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/13070236?v=4&w=50&h=50&mask=circle)](https://github.com/TeoZosa)[![8817639](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/8817639?v=4&w=50&h=50&mask=circle)](https://github.com/ThomVett)[![17309187](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/17309187?v=4&w=50&h=50&mask=circle)](https://github.com/datability-io)[![2640499](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/2640499?v=4&w=50&h=50&mask=circle)](https://github.com/wirthual)[![97332401](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/97332401?v=4&w=50&h=50&mask=circle)](https://github.com/RaghavMangla)[![100569684](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/100569684?v=4&w=50&h=50&mask=circle)](https://github.com/RRap0so)[![147648834](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/147648834?v=4&w=50&h=50&mask=circle)](https://github.com/quinten-flwls)[![37170063](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/37170063?v=4&w=50&h=50&mask=circle)](https://github.com/Qiwen-Yu)[![43886578](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/43886578?v=4&w=50&h=50&mask=circle)](https://github.com/400Ping)[![125105](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/125105?v=4&w=50&h=50&mask=circle)](https://github.com/tekumara)[![37547264](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/37547264?v=4&w=50&h=50&mask=circle)](https://github.com/Nan2018)[![49385643](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/49385643?v=4&w=50&h=50&mask=circle)](https://github.com/MinuraPunchihewa)[![10376195](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/10376195?v=4&w=50&h=50&mask=circle)](https://github.com/myz540)[![4417105](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/4417105?v=4&w=50&h=50&mask=circle)](https://github.com/Terryhung)[![73247359](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/73247359?v=4&w=50&h=50&mask=circle)](https://github.com/stef-stripe)[![12913704](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/12913704?v=4&w=50&h=50&mask=circle)](https://github.com/mg515)[![119345186](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/119345186?v=4&w=50&h=50&mask=circle)](https://github.com/mcloney-ddm)[![13331724](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/13331724?v=4&w=50&h=50&mask=circle)](https://github.com/martinlyra)[![24611279](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/24611279?v=4&w=50&h=50&mask=circle)](https://github.com/ericwudayi)[![6333870](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6333870?v=4&w=50&h=50&mask=circle)](https://github.com/demmerichs)[![4023015](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/4023015?v=4&w=50&h=50&mask=circle)](https://github.com/pradithya)[![12450632](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/12450632?v=4&w=50&h=50&mask=circle)](https://github.com/ajsalow)[![3741621](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/3741621?v=4&w=50&h=50&mask=circle)](https://github.com/palchicz)[![43726198](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/43726198?v=4&w=50&h=50&mask=circle)](https://github.com/yundai424)[![131146298](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/131146298?v=4&w=50&h=50&mask=circle)](https://github.com/yini7777)[![29053051](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/29053051?v=4&w=50&h=50&mask=circle)](https://github.com/XinEDprob)[![52355146](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/52355146?v=4&w=50&h=50&mask=circle)](https://github.com/lowc1012)[![40901950](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/40901950?v=4&w=50&h=50&mask=circle)](https://github.com/WebOfNakedFancies)[![67166843](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/67166843?v=4&w=50&h=50&mask=circle)](https://github.com/vvasavada-fn)[![15071835](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/15071835?v=4&w=50&h=50&mask=circle)](https://github.com/va6996)[![3391550](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/3391550?v=4&w=50&h=50&mask=circle)](https://github.com/devictr)[![57967031](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/57967031?v=4&w=50&h=50&mask=circle)](https://github.com/varshaparthay)[![5092599](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5092599?v=4&w=50&h=50&mask=circle)](https://github.com/vchowdhary)[![26834658](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/26834658?v=4&w=50&h=50&mask=circle)](https://github.com/techytushar)[![14007150](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/14007150?v=4&w=50&h=50&mask=circle)](https://github.com/deepyaman)[![2380665](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/2380665?v=4&w=50&h=50&mask=circle)](https://github.com/DavidMertz)[![16297104](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/16297104?v=4&w=50&h=50&mask=circle)](https://github.com/danpf)[![10463690](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/10463690?v=4&w=50&h=50&mask=circle)](https://github.com/cjidboon94)[![26920893](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/26920893?v=4&w=50&h=50&mask=circle)](https://github.com/chinghongfang)[![27000005](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/27000005?v=4&w=50&h=50&mask=circle)](https://github.com/supercharleszhu)[![420942](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/420942?v=4&w=50&h=50&mask=circle)](https://github.com/cameronraysmith)[![6288302](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6288302?v=4&w=50&h=50&mask=circle)](https://github.com/CalvinLeather)[![179035736](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/179035736?v=4&w=50&h=50&mask=circle)](https://github.com/bryan-hunted)[![4396228](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/4396228?v=4&w=50&h=50&mask=circle)](https://github.com/bryanwweber)[![7422223](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/7422223?v=4&w=50&h=50&mask=circle)](https://github.com/bcvanmeurs)[![234145](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/234145?v=4&w=50&h=50&mask=circle)](https://github.com/benoistlaurent)[![31381038](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/31381038?v=4&w=50&h=50&mask=circle)](https://github.com/lordnodd)[![49250723](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/49250723?v=4&w=50&h=50&mask=circle)](https://github.com/ArthurBook)[![58334441](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/58334441?v=4&w=50&h=50&mask=circle)](https://github.com/wckdman)[![23013825](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/23013825?v=4&w=50&h=50&mask=circle)](https://github.com/arpitbhardwaj)[![77167782](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/77167782?v=4&w=50&h=50&mask=circle)](https://github.com/apatel-fn)[![48966647](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/48966647?v=4&w=50&h=50&mask=circle)](https://github.com/asahalyft)[![7005765](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/7005765?v=4&w=50&h=50&mask=circle)](https://github.com/convexquad)[![54333860](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/54333860?v=4&w=50&h=50&mask=circle)](https://github.com/aalavian)[![110886184](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/110886184?v=4&w=50&h=50&mask=circle)](https://github.com/aditya7302)[![19853373](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/19853373?v=4&w=50&h=50&mask=circle)](https://github.com/NotMatthewGriffin)[![34498039](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/34498039?v=4&w=50&h=50&mask=circle)](https://github.com/matheusMoreno)[![20173739](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/20173739?v=4&w=50&h=50&mask=circle)](https://github.com/madhur-tandon)[![4410453](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/4410453?v=4&w=50&h=50&mask=circle)](https://github.com/mdjong1)[![113847439](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/113847439?v=4&w=50&h=50&mask=circle)](https://github.com/LunarMarathon)[![131469540](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/131469540?v=4&w=50&h=50&mask=circle)](https://github.com/knordstrom-muon)[![488594](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/488594?v=4&w=50&h=50&mask=circle)](https://github.com/jcugat)[![6984748](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6984748?v=4&w=50&h=50&mask=circle)](https://github.com/jbrambleDC)[![28351896](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/28351896?v=4&w=50&h=50&mask=circle)](https://github.com/JasonZhu1313)[![1274471](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1274471?v=4&w=50&h=50&mask=circle)](https://github.com/Sovietaced)[![7358951](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/7358951?v=4&w=50&h=50&mask=circle)](https://github.com/frsann)[![121866694](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/121866694?v=4&w=50&h=50&mask=circle)](https://github.com/franco-bocci)[![1530049](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1530049?v=4&w=50&h=50&mask=circle)](https://github.com/felixmulder)[![111539728](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/111539728?v=4&w=50&h=50&mask=circle)](https://github.com/ddl-ebrown)[![23107192](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/23107192?v=4&w=50&h=50&mask=circle)](https://github.com/YmirKhang)[![6596957](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6596957?v=4&w=50&h=50&mask=circle)](https://github.com/elibixby)[![173942673](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/173942673?v=4&w=50&h=50&mask=circle)](https://github.com/dylanspag-lmco)[![103009868](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/103009868?v=4&w=50&h=50&mask=circle)](https://github.com/douenergy)[![6774758](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6774758?v=4&w=50&h=50&mask=circle)](https://github.com/ddhirajkumar)[![50860453](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/50860453?v=4&w=50&h=50&mask=circle)](https://github.com/charlie0220)[![6506810](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6506810?v=4&w=50&h=50&mask=circle)](https://github.com/stephen37)[![6610300](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6610300?v=4&w=50&h=50&mask=circle)](https://github.com/ursucarina)[![55718143](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/55718143?v=4&w=50&h=50&mask=circle)](https://github.com/anrusina)[![65977800](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/65977800?v=4&w=50&h=50&mask=circle)](https://github.com/service-github-lyft-semantic-release)[![84735036](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/84735036?v=4&w=50&h=50&mask=circle)](https://github.com/jsonporter)[![85753828](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/85753828?v=4&w=50&h=50&mask=circle)](https://github.com/govalt)[![105876962](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/105876962?v=4&w=50&h=50&mask=circle)](https://github.com/james-union)[![101579322](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/101579322?v=4&w=50&h=50&mask=circle)](https://github.com/olga-union)[![26953709](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/26953709?v=4&w=50&h=50&mask=circle)](https://github.com/Pianist038801)[![25038146](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/25038146?v=4&w=50&h=50&mask=circle)](https://github.com/eugenejahn)[![88684372](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/88684372?v=4&w=50&h=50&mask=circle)](https://github.com/4nalog)[![8129392](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/8129392?v=4&w=50&h=50&mask=circle)](https://github.com/FrankFlitton)[![99441958](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/99441958?v=4&w=50&h=50&mask=circle)](https://github.com/apTalya)[![59022542](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/59022542?v=4&w=50&h=50&mask=circle)](https://github.com/lyonlu13)[![72861891](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/72861891?v=4&w=50&h=50&mask=circle)](https://github.com/xwk1246)[![1902623](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1902623?v=4&w=50&h=50&mask=circle)](https://github.com/trutx)[![59891164](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/59891164?v=4&w=50&h=50&mask=circle)](https://github.com/K-Kumar-01)[![20668349](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/20668349?v=4&w=50&h=50&mask=circle)](https://github.com/HiromuHota)[![58770001](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/58770001?v=4&w=50&h=50&mask=circle)](https://github.com/Professional0321)[![1388071](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1388071?v=4&w=50&h=50&mask=circle)](https://github.com/aviaviavi)[![18363301](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/18363301?v=4&w=50&h=50&mask=circle)](https://github.com/jimbobby5)[![25695302](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/25695302?v=4&w=50&h=50&mask=circle)](https://github.com/sisco0)[![6399428](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6399428?v=4&w=50&h=50&mask=circle)](https://github.com/live-wire)[![17351764](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/17351764?v=4&w=50&h=50&mask=circle)](https://github.com/daniel-shuy)[![31982395](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/31982395?v=4&w=50&h=50&mask=circle)](https://github.com/alexapdev)[![7515359](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/7515359?v=4&w=50&h=50&mask=circle)](https://github.com/narape)[![7548823](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/7548823?v=4&w=50&h=50&mask=circle)](https://github.com/manuelrombach)[![50679871](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/50679871?v=4&w=50&h=50&mask=circle)](https://github.com/lupasarin)[![25364490](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/25364490?v=4&w=50&h=50&mask=circle)](https://github.com/haoyuez)[![3451399](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/3451399?v=4&w=50&h=50&mask=circle)](https://github.com/skiptomyliu)[![66767992](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/66767992?v=4&w=50&h=50&mask=circle)](https://github.com/10sharmashivam)[![62209650](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/62209650?v=4&w=50&h=50&mask=circle)](https://github.com/3t8)[![82604841](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/82604841?v=4&w=50&h=50&mask=circle)](https://github.com/davidmirror-ops)[![1892175](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1892175?v=4&w=50&h=50&mask=circle)](https://github.com/zeryx)[![66259759](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/66259759?v=4&w=50&h=50&mask=circle)](https://github.com/popojk)[![64233065](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/64233065?v=4&w=50&h=50&mask=circle)](https://github.com/rachfop)[![11166516](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/11166516?v=4&w=50&h=50&mask=circle)](https://github.com/hebiao064)[![110307215](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/110307215?v=4&w=50&h=50&mask=circle)](https://github.com/sumana-2705)[![35962310](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/35962310?v=4&w=50&h=50&mask=circle)](https://github.com/trishitapingolia)[![91927689](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/91927689?v=4&w=50&h=50&mask=circle)](https://github.com/Smartmind12)[![726061](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/726061?v=4&w=50&h=50&mask=circle)](https://github.com/huxuan)[![42114946](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/42114946?v=4&w=50&h=50&mask=circle)](https://github.com/DenChenn)[![47872044](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/47872044?v=4&w=50&h=50&mask=circle)](https://github.com/privatedumbo)[![105229971](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/105229971?v=4&w=50&h=50&mask=circle)](https://github.com/tjKairos)[![200401](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/200401?v=4&w=50&h=50&mask=circle)](https://github.com/arturdryomov)[![13770222](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/13770222?v=4&w=50&h=50&mask=circle)](https://github.com/ChickenTarm)[![117322020](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/117322020?v=4&w=50&h=50&mask=circle)](https://github.com/cdreetz)[![24739949](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/24739949?v=4&w=50&h=50&mask=circle)](https://github.com/felixwang9817)[![64864908](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/64864908?v=4&w=50&h=50&mask=circle)](https://github.com/xshen8888)[![10430635](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/10430635?v=4&w=50&h=50&mask=circle)](https://github.com/juandiegopalomino)[![31911175](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/31911175?v=4&w=50&h=50&mask=circle)](https://github.com/kanyesthaker)[![104152793](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/104152793?v=4&w=50&h=50&mask=circle)](https://github.com/marc-union)[![27818609](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/27818609?v=4&w=50&h=50&mask=circle)](https://github.com/michaeltinsley)[![22797900](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/22797900?v=4&w=50&h=50&mask=circle)](https://github.com/stolarczyk)[![6486584](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6486584?v=4&w=50&h=50&mask=circle)](https://github.com/mucahitkantepe)[![321459](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/321459?v=4&w=50&h=50&mask=circle)](https://github.com/oyevtushok)[![405480](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/405480?v=4&w=50&h=50&mask=circle)](https://github.com/georgesnelling)[![54046807](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/54046807?v=4&w=50&h=50&mask=circle)](https://github.com/kamaleybov)[![1004789](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1004789?v=4&w=50&h=50&mask=circle)](https://github.com/dschaller)[![1659415](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1659415?v=4&w=50&h=50&mask=circle)](https://github.com/dav009)[![1031759](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1031759?v=4&w=50&h=50&mask=circle)](https://github.com/agiron123)[![107633597](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/107633597?v=4&w=50&h=50&mask=circle)](https://github.com/peterghaddad)[![50983601](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/50983601?v=4&w=50&h=50&mask=circle)](https://github.com/zychen5186)[![136724527](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/136724527?v=4&w=50&h=50&mask=circle)](https://github.com/Murdock9803)[![144381122](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/144381122?v=4&w=50&h=50&mask=circle)](https://github.com/vraiyaninv)[![24486999](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/24486999?v=4&w=50&h=50&mask=circle)](https://github.com/suravshrestha)[![69161722](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/69161722?v=4&w=50&h=50&mask=circle)](https://github.com/noobkid2411)[![790725](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/790725?v=4&w=50&h=50&mask=circle)](https://github.com/rodrigobaron)[![43336767](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/43336767?v=4&w=50&h=50&mask=circle)](https://github.com/yongchand)[![36594527](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/36594527?v=4&w=50&h=50&mask=circle)](https://github.com/mishmanners)[![25391173](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/25391173?v=4&w=50&h=50&mask=circle)](https://github.com/nicklofaso)[![86911142](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/86911142?v=4&w=50&h=50&mask=circle)](https://github.com/idivyanshbansal)[![380927](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/380927?v=4&w=50&h=50&mask=circle)](https://github.com/cpaulik)[![480621](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/480621?v=4&w=50&h=50&mask=circle)](https://github.com/davidxia)[![1335881](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1335881?v=4&w=50&h=50&mask=circle)](https://github.com/hoyajigi)[![100597998](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/100597998?v=4&w=50&h=50&mask=circle)](https://github.com/MrKrishnaAgarwal)[![4830700](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/4830700?v=4&w=50&h=50&mask=circle)](https://github.com/NitinAgg)[![139771199](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/139771199?v=4&w=50&h=50&mask=circle)](https://github.com/taieeuu)[![33272587](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/33272587?v=4&w=50&h=50&mask=circle)](https://github.com/samuel-sujith)[![10438373](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/10438373?v=4&w=50&h=50&mask=circle)](https://github.com/SKalt)[![24543401](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/24543401?v=4&w=50&h=50&mask=circle)](https://github.com/asoundarya96)[![141538510](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/141538510?v=4&w=50&h=50&mask=circle)](https://github.com/SophieTech88)[![47355538](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/47355538?v=4&w=50&h=50&mask=circle)](https://github.com/siiddhantt)[![54034701](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/54034701?v=4&w=50&h=50&mask=circle)](https://github.com/peterxcli)[![580328](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/580328?v=4&w=50&h=50&mask=circle)](https://github.com/ilikedata)[![26265392](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/26265392?v=4&w=50&h=50&mask=circle)](https://github.com/ttanay)[![7144772](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/7144772?v=4&w=50&h=50&mask=circle)](https://github.com/sighingnow)[![61864060](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/61864060?v=4&w=50&h=50&mask=circle)](https://github.com/HuangTing-Yao)[![1027207](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1027207?v=4&w=50&h=50&mask=circle)](https://github.com/orf)[![78115767](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/78115767?v=4&w=50&h=50&mask=circle)](https://github.com/trevormcguire)[![8755869](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/8755869?v=4&w=50&h=50&mask=circle)](https://github.com/paravatha)[![141313113](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/141313113?v=4&w=50&h=50&mask=circle)](https://github.com/robert-ulbrich-mercedes-benz)[![6528449](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6528449?v=4&w=50&h=50&mask=circle)](https://github.com/uschi2000)[![576968](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/576968?v=4&w=50&h=50&mask=circle)](https://github.com/ronaldosaheki)[![10095462](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/10095462?v=4&w=50&h=50&mask=circle)](https://github.com/GRomR1)[![144255851](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/144255851?v=4&w=50&h=50&mask=circle)](https://github.com/Sennuno)[![36827492](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/36827492?v=4&w=50&h=50&mask=circle)](https://github.com/shahwar9)[![34468461](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/34468461?v=4&w=50&h=50&mask=circle)](https://github.com/sshardool)[![1908193](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1908193?v=4&w=50&h=50&mask=circle)](https://github.com/shengyu7697)[![133936](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/133936?v=4&w=50&h=50&mask=circle)](https://github.com/shihgianlee)[![119912892](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/119912892?v=4&w=50&h=50&mask=circle)](https://github.com/Virtual4087)[![46835608](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/46835608?v=4&w=50&h=50&mask=circle)](https://github.com/shreyas44)[![40143026](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/40143026?v=4&w=50&h=50&mask=circle)](https://github.com/hampusrosvall)[![77197126](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/77197126?v=4&w=50&h=50&mask=circle)](https://github.com/hitarth01)[![300315](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/300315?v=4&w=50&h=50&mask=circle)](https://github.com/jcourteau)[![1220444](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1220444?v=4&w=50&h=50&mask=circle)](https://github.com/jkhales)[![106815366](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/106815366?v=4&w=50&h=50&mask=circle)](https://github.com/jw0515)[![1568889](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1568889?v=4&w=50&h=50&mask=circle)](https://github.com/leorleor)[![168411899](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/168411899?v=4&w=50&h=50&mask=circle)](https://github.com/mthemis-provenir)[![937967](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/937967?v=4&w=50&h=50&mask=circle)](https://github.com/moose007)[![73983677](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/73983677?v=4&w=50&h=50&mask=circle)](https://github.com/omahs)[![114232404](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/114232404?v=4&w=50&h=50&mask=circle)](https://github.com/sanjaychouhan-adf)[![11962777](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/11962777?v=4&w=50&h=50&mask=circle)](https://github.com/ssen85)[![14996868](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/14996868?v=4&w=50&h=50&mask=circle)](https://github.com/v01dXYZ)[![93438190](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/93438190?v=4&w=50&h=50&mask=circle)](https://github.com/wanderer163)[![16526627](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/16526627?v=4&w=50&h=50&mask=circle)](https://github.com/vijaysaravana)[![697033](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/697033?v=4&w=50&h=50&mask=circle)](https://github.com/vglocus)[![2272137](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/2272137?v=4&w=50&h=50&mask=circle)](https://github.com/Dlougach)[![39889](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/39889?v=4&w=50&h=50&mask=circle)](https://github.com/yarikoptic)[![12821510](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/12821510?v=4&w=50&h=50&mask=circle)](https://github.com/ongkong)[![26526132](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/26526132?v=4&w=50&h=50&mask=circle)](https://github.com/bearomorphism)[![43691987](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/43691987?v=4&w=50&h=50&mask=circle)](https://github.com/desihsu)[![5346764](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5346764?v=4&w=50&h=50&mask=circle)](https://github.com/fsz285)[![143190185](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/143190185?v=4&w=50&h=50&mask=circle)](https://github.com/gdabisias)[![22917741](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/22917741?v=4&w=50&h=50&mask=circle)](https://github.com/gigi-at-zymergen)[![11796986](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/11796986?v=4&w=50&h=50&mask=circle)](https://github.com/avan-sh)[![489331](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/489331?v=4&w=50&h=50&mask=circle)](https://github.com/brndnblck)[![304786](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/304786?v=4&w=50&h=50&mask=circle)](https://github.com/kinow)[![156356273](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/156356273?v=4&w=50&h=50&mask=circle)](https://github.com/cratiu222)[![24402505](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/24402505?v=4&w=50&h=50&mask=circle)](https://github.com/Daeruin)[![102558755](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/102558755?v=4&w=50&h=50&mask=circle)](https://github.com/dyu-bot)[![146735585](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/146735585?v=4&w=50&h=50&mask=circle)](https://github.com/nnsW3)[![20135478](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/20135478?v=4&w=50&h=50&mask=circle)](https://github.com/Juneezee)[![1627021](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1627021?v=4&w=50&h=50&mask=circle)](https://github.com/EraYaN)[![11456773](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/11456773?v=4&w=50&h=50&mask=circle)](https://github.com/fvde)[![64676594](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/64676594?v=4&w=50&h=50&mask=circle)](https://github.com/abhijeet007rocks8)[![132337675](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/132337675?v=4&w=50&h=50&mask=circle)](https://github.com/adarsh-jha-dev)[![1627770](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1627770?v=4&w=50&h=50&mask=circle)](https://github.com/amitani)[![128223364](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/128223364?v=4&w=50&h=50&mask=circle)](https://github.com/blindaks)[![66388192](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/66388192?v=4&w=50&h=50&mask=circle)](https://github.com/mounesi)[![13237080](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/13237080?v=4&w=50&h=50&mask=circle)](https://github.com/aminmaghsodi)[![14992189](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/14992189?v=4&w=50&h=50&mask=circle)](https://github.com/eanakhl)[![1175392](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1175392?v=4&w=50&h=50&mask=circle)](https://github.com/adinin)[![26172355](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/26172355?v=4&w=50&h=50&mask=circle)](https://github.com/ALMerrill)[![48056316](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/48056316?v=4&w=50&h=50&mask=circle)](https://github.com/ap0calypse8)[![7475946](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/7475946?v=4&w=50&h=50&mask=circle)](https://github.com/anton-malakhov)[![1174730](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1174730?v=4&w=50&h=50&mask=circle)](https://github.com/mouuff)[![93093775](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/93093775?v=4&w=50&h=50&mask=circle)](https://github.com/Ash0807)[![44368997](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/44368997?v=4&w=50&h=50&mask=circle)](https://github.com/radiantly)[![16404204](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/16404204?v=4&w=50&h=50&mask=circle)](https://github.com/Jeinhaus)[![3033592](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/3033592?v=4&w=50&h=50&mask=circle)](https://github.com/kazesberger)[![13591898](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/13591898?v=4&w=50&h=50&mask=circle)](https://github.com/lauralindy)[![19229049](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/19229049?v=4&w=50&h=50&mask=circle)](https://github.com/lsena)[![123787712](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/123787712?v=4&w=50&h=50&mask=circle)](https://github.com/mark-thm)[![2236795](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/2236795?v=4&w=50&h=50&mask=circle)](https://github.com/mhotan)[![10829864](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/10829864?v=4&w=50&h=50&mask=circle)](https://github.com/mcanueste)[![36989112](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/36989112?v=4&w=50&h=50&mask=circle)](https://github.com/nishantwrp)[![260015](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/260015?v=4&w=50&h=50&mask=circle)](https://github.com/ossareh)[![6987428](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6987428?v=4&w=50&h=50&mask=circle)](https://github.com/guyarad)[![1596283](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1596283?v=4&w=50&h=50&mask=circle)](https://github.com/guy4261)[![7490199](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/7490199?v=4&w=50&h=50&mask=circle)](https://github.com/Lundez)[![10345184](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/10345184?v=4&w=50&h=50&mask=circle)](https://github.com/hasukmistry)[![91054457](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/91054457?v=4&w=50&h=50&mask=circle)](https://github.com/HeetVekariya)[![29532638](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/29532638?v=4&w=50&h=50&mask=circle)](https://github.com/rokrokss)[![22633385](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/22633385?v=4&w=50&h=50&mask=circle)](https://github.com/eltociear)[![151841](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/151841?v=4&w=50&h=50&mask=circle)](https://github.com/goodgravy)[![46633758](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/46633758?v=4&w=50&h=50&mask=circle)](https://github.com/jsong336)[![14008978](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/14008978?v=4&w=50&h=50&mask=circle)](https://github.com/jeremydonahue)[![9272376](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/9272376?v=4&w=50&h=50&mask=circle)](https://github.com/jonasdebeukelaer)[![1633460](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1633460?v=4&w=50&h=50&mask=circle)](https://github.com/jmcarp)[![1043051](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1043051?v=4&w=50&h=50&mask=circle)](https://github.com/kylewaynebenson)[![21953442](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/21953442?v=4&w=50&h=50&mask=circle)](https://github.com/Gui11aum3)[![16461847](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/16461847?v=4&w=50&h=50&mask=circle)](https://github.com/JakeNeyer)[![299421](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/299421?v=4&w=50&h=50&mask=circle)](https://github.com/aliavni)[![2845540](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/2845540?v=4&w=50&h=50&mask=circle)](https://github.com/RustedBones)[![4056828](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/4056828?v=4&w=50&h=50&mask=circle)](https://github.com/pablocasares)[![138898](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/138898?v=4&w=50&h=50&mask=circle)](https://github.com/andyczerwonka)[![150935185](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/150935185?v=4&w=50&h=50&mask=circle)](https://github.com/jschuchart-spot)[![471021](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/471021?v=4&w=50&h=50&mask=circle)](https://github.com/marschall)[![5732047](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5732047?v=4&w=50&h=50&mask=circle)](https://github.com/stormy-ua)[![1071153](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1071153?v=4&w=50&h=50&mask=circle)](https://github.com/evdokim)[![13670774](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/13670774?v=4&w=50&h=50&mask=circle)](https://github.com/AndersonReyes)[![438217](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/438217?v=4&w=50&h=50&mask=circle)](https://github.com/acet)[![71284190](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/71284190?v=4&w=50&h=50&mask=circle)](https://github.com/gdungca-fn)[![85021780](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/85021780?v=4&w=50&h=50&mask=circle)](https://github.com/Abdullahi-Ahmed)[![48512530](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/48512530?v=4&w=50&h=50&mask=circle)](https://github.com/amaleelhamri)[![3275593](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/3275593?v=4&w=50&h=50&mask=circle)](https://github.com/pradyunsg)[![66853113](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/in/68672?v=4&w=50&h=50&mask=circle)](https://github.com/apps/pre-commit-ci)[![1834509](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1834509?v=4&w=50&h=50&mask=circle)](https://github.com/jdknight)[![107893](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/107893?v=4&w=50&h=50&mask=circle)](https://github.com/kmike)[![1324225](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1324225?v=4&w=50&h=50&mask=circle)](https://github.com/hugovk)[![1300022](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1300022?v=4&w=50&h=50&mask=circle)](https://github.com/sirosen)[![244656](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/244656?v=4&w=50&h=50&mask=circle)](https://github.com/humitos)[![467294](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/467294?v=4&w=50&h=50&mask=circle)](https://github.com/bastimeyer)[![71486](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/71486?v=4&w=50&h=50&mask=circle)](https://github.com/asmeurer)[![20280470](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/20280470?v=4&w=50&h=50&mask=circle)](https://github.com/drewyh)[![3533182](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/3533182?v=4&w=50&h=50&mask=circle)](https://github.com/polyzen)[![199429](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/199429?v=4&w=50&h=50&mask=circle)](https://github.com/dvarrazzo)[![1032633](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1032633?v=4&w=50&h=50&mask=circle)](https://github.com/dbitouze)[![1313087](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1313087?v=4&w=50&h=50&mask=circle)](https://github.com/idryzhov)[![521097](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/521097?v=4&w=50&h=50&mask=circle)](https://github.com/pauloxnet)[![63936253](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/63936253?v=4&w=50&h=50&mask=circle)](https://github.com/ichard26)[![18519037](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/18519037?v=4&w=50&h=50&mask=circle)](https://github.com/sethmlarson)[![413772](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/413772?v=4&w=50&h=50&mask=circle)](https://github.com/graingert)[![11478411](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/11478411?v=4&w=50&h=50&mask=circle)](https://github.com/stonecharioteer)[![6739793](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6739793?v=4&w=50&h=50&mask=circle)](https://github.com/yeraydiazdiaz)[![83365562](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/83365562?v=4&w=50&h=50&mask=circle)](https://github.com/eviau-sat)[![6670894](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6670894?v=4&w=50&h=50&mask=circle)](https://github.com/rozsasarpi)[![86675](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/86675?v=4&w=50&h=50&mask=circle)](https://github.com/estan)[![4748863](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/4748863?v=4&w=50&h=50&mask=circle)](https://github.com/pseudomuto)[![181308](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/181308?v=4&w=50&h=50&mask=circle)](https://github.com/htdvisser)[![1390277](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1390277?v=4&w=50&h=50&mask=circle)](https://github.com/jacobtolar)[![1391982](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1391982?v=4&w=50&h=50&mask=circle)](https://github.com/ezimanyi)[![135130171](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/135130171?v=4&w=50&h=50&mask=circle)](https://github.com/hmacias-avaya)[![3880001](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/3880001?v=4&w=50&h=50&mask=circle)](https://github.com/lpabon)[![770392](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/770392?v=4&w=50&h=50&mask=circle)](https://github.com/ArcEye)[![6178510](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6178510?v=4&w=50&h=50&mask=circle)](https://github.com/mingrammer)[![5111931](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5111931?v=4&w=50&h=50&mask=circle)](https://github.com/aschrijver)[![148219809](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/148219809?v=4&w=50&h=50&mask=circle)](https://github.com/panzerfahrer)[![16724](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/16724?v=4&w=50&h=50&mask=circle)](https://github.com/glasser)[![17330872](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/17330872?v=4&w=50&h=50&mask=circle)](https://github.com/murph0)[![419419](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/419419?v=4&w=50&h=50&mask=circle)](https://github.com/zetaron)[![1014](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1014?v=4&w=50&h=50&mask=circle)](https://github.com/sunfmin)[![504507](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/504507?v=4&w=50&h=50&mask=circle)](https://github.com/guozheng)[![8841470](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/8841470?v=4&w=50&h=50&mask=circle)](https://github.com/suusan2go)[![901479](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/901479?v=4&w=50&h=50&mask=circle)](https://github.com/mhaberler)[![353644](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/353644?v=4&w=50&h=50&mask=circle)](https://github.com/dreampuf)[![12421077](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/12421077?v=4&w=50&h=50&mask=circle)](https://github.com/UnicodingUnicorn)[![809865](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/809865?v=4&w=50&h=50&mask=circle)](https://github.com/philiptzou)[![19378](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/19378?v=4&w=50&h=50&mask=circle)](https://github.com/timabell)[![614934](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/614934?v=4&w=50&h=50&mask=circle)](https://github.com/adzenith)[![1113245](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1113245?v=4&w=50&h=50&mask=circle)](https://github.com/jasonhancock)[![101659](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/101659?v=4&w=50&h=50&mask=circle)](https://github.com/matryer)[![4730508](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/4730508?v=4&w=50&h=50&mask=circle)](https://github.com/piotrrojek)[![33036160](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/33036160?v=4&w=50&h=50&mask=circle)](https://github.com/jasonsattler)[![470810](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/470810?v=4&w=50&h=50&mask=circle)](https://github.com/sbward)[![7592392](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/7592392?v=4&w=50&h=50&mask=circle)](https://github.com/Pisush)[![94814](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/94814?v=4&w=50&h=50&mask=circle)](https://github.com/tamalsaha)[![8147854](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/8147854?v=4&w=50&h=50&mask=circle)](https://github.com/marianina8)[![1683714](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1683714?v=4&w=50&h=50&mask=circle)](https://github.com/naysayer)[![2807589](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/2807589?v=4&w=50&h=50&mask=circle)](https://github.com/darwayne)[![17263167](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/17263167?v=4&w=50&h=50&mask=circle)](https://github.com/jsteenb2)[![1005](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1005?v=4&w=50&h=50&mask=circle)](https://github.com/ernesto-jimenez)[![6386887](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6386887?v=4&w=50&h=50&mask=circle)](https://github.com/AgrimPrasad)[![615811](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/615811?v=4&w=50&h=50&mask=circle)](https://github.com/dahernan)[![75184](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/75184?v=4&w=50&h=50&mask=circle)](https://github.com/jtarchie)[![469669](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/469669?v=4&w=50&h=50&mask=circle)](https://github.com/jdtobe)[![28523](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/28523?v=4&w=50&h=50&mask=circle)](https://github.com/alrs)[![426880](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/426880?v=4&w=50&h=50&mask=circle)](https://github.com/tkent)[![10113228](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/10113228?v=4&w=50&h=50&mask=circle)](https://github.com/urisimchoni)[![5751464](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5751464?v=4&w=50&h=50&mask=circle)](https://github.com/Xercoy)[![2405410](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/2405410?v=4&w=50&h=50&mask=circle)](https://github.com/marbergq)[![5082160](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5082160?v=4&w=50&h=50&mask=circle)](https://github.com/anothrNick)[![11335612](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/11335612?v=4&w=50&h=50&mask=circle)](https://github.com/fermoya)[![23391642](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/23391642?v=4&w=50&h=50&mask=circle)](https://github.com/sbe-arg)[![1024762](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/1024762?v=4&w=50&h=50&mask=circle)](https://github.com/PeerXu)[![7390781](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/7390781?v=4&w=50&h=50&mask=circle)](https://github.com/reececomo)[![49680](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/49680?v=4&w=50&h=50&mask=circle)](https://github.com/dmerrick)[![87524](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/87524?v=4&w=50&h=50&mask=circle)](https://github.com/andrewcole)[![866505](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/866505?v=4&w=50&h=50&mask=circle)](https://github.com/phish108)[![2611549](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/2611549?v=4&w=50&h=50&mask=circle)](https://github.com/endrjuskr)[![8232503](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/8232503?v=4&w=50&h=50&mask=circle)](https://github.com/sjauld)[![118945041](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/118945041?v=4&w=50&h=50&mask=circle)](https://github.com/vq-ambiata)[![3807434](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/3807434?v=4&w=50&h=50&mask=circle)](https://github.com/tomsolem)[![16513382](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/16513382?v=4&w=50&h=50&mask=circle)](https://github.com/117)[![8320753](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/8320753?v=4&w=50&h=50&mask=circle)](https://github.com/lovromazgon)[![5655837](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5655837?v=4&w=50&h=50&mask=circle)](https://github.com/gukoff)[![49961058](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/49961058?v=4&w=50&h=50&mask=circle)](https://github.com/bevans-HD)[![25625597](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/25625597?v=4&w=50&h=50&mask=circle)](https://github.com/zero-below)[![62775347](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/62775347?v=4&w=50&h=50&mask=circle)](https://github.com/okozachenko1203)[![53085803](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/53085803?v=4&w=50&h=50&mask=circle)](https://github.com/cuttingedge1109)[![5067549](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/5067549?v=4&w=50&h=50&mask=circle)](https://github.com/pellared)[![25486791](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/25486791?v=4&w=50&h=50&mask=circle)](https://github.com/pavyarov)[![995707](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/995707?v=4&w=50&h=50&mask=circle)](https://github.com/OskarStark)[![2302957](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/2302957?v=4&w=50&h=50&mask=circle)](https://github.com/JeremyLWright)[![10090384](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/10090384?v=4&w=50&h=50&mask=circle)](https://github.com/ivanpk)[![17337515](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/17337515?v=4&w=50&h=50&mask=circle)](https://github.com/fabricepipart)[![8296645](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/8296645?v=4&w=50&h=50&mask=circle)](https://github.com/imdanielsp)[![6388483](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/6388483?v=4&w=50&h=50&mask=circle)](https://github.com/zsedem)[![69170839](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/69170839?v=4&w=50&h=50&mask=circle)](https://github.com/adam-berrio)[![282792](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/282792?v=4&w=50&h=50&mask=circle)](https://github.com/asford)[![38894122](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/38894122?v=4&w=50&h=50&mask=circle)](https://github.com/bmcconeghy)[![16698198](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/16698198?v=4&w=50&h=50&mask=circle)](https://github.com/conda-forge-admin)[![36490558](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/36490558?v=4&w=50&h=50&mask=circle)](https://github.com/regro-cf-autotick-bot)[![79913779](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/in/102928?v=4&w=50&h=50&mask=circle)](https://github.com/apps/conda-forge-curator)[![41898282](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/in/15368?v=4&w=50&h=50&mask=circle)](https://github.com/apps/github-actions)[![18567580](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/18567580?v=4&w=50&h=50&mask=circle)](https://github.com/conda-forge-linter)[![72671586](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/72671586?v=4&w=50&h=50&mask=circle)](https://github.com/pheianox)[![3760025](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/3760025?v=4&w=50&h=50&mask=circle)](https://github.com/gaga5lala)[![115705553](https://images.weserv.nl/?url=https://avatars.githubusercontent.com/u/115705553?v=4&w=50&h=50&mask=circle)](https://github.com/divyank000) ## License diff --git a/boilerplate/flyte/golang_support_tools/go.mod b/boilerplate/flyte/golang_support_tools/go.mod index c7676dd907..469b42d079 100644 --- a/boilerplate/flyte/golang_support_tools/go.mod +++ b/boilerplate/flyte/golang_support_tools/go.mod @@ -1,12 +1,12 @@ module github.com/flyteorg/boilerplate -go 1.22 +go 1.22.1 require ( github.com/EngHabu/mockery v0.0.0-20220916190332-dde70e38baba github.com/alvaroloes/enumer v1.1.2 github.com/flyteorg/flyte/flytestdlib v1.11.0 - github.com/golangci/golangci-lint v1.53.3 + github.com/golangci/golangci-lint v1.61.0 github.com/pseudomuto/protoc-gen-doc v1.4.1 github.com/vektra/mockery/v2 v2.40.3 ) @@ -14,191 +14,198 @@ require ( require ( 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect 4d63.com/gochecknoglobals v0.2.1 // indirect - cloud.google.com/go v0.112.0 // indirect - cloud.google.com/go/compute v1.23.3 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.5 // indirect - cloud.google.com/go/storage v1.36.0 // indirect - github.com/4meepo/tagalign v1.2.2 // indirect - github.com/Abirdcfly/dupword v0.0.11 // indirect - github.com/Antonboom/errname v0.1.10 // indirect - github.com/Antonboom/nilnil v0.1.5 // indirect + cloud.google.com/go v0.115.1 // indirect + cloud.google.com/go/auth v0.9.3 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/compute/metadata v0.5.0 // indirect + cloud.google.com/go/iam v1.2.0 // indirect + cloud.google.com/go/storage v1.43.0 // indirect + github.com/4meepo/tagalign v1.3.4 // indirect + github.com/Abirdcfly/dupword v0.1.1 // indirect + github.com/Antonboom/errname v0.1.13 // indirect + github.com/Antonboom/nilnil v0.1.9 // indirect + github.com/Antonboom/testifylint v1.4.3 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect - github.com/BurntSushi/toml v1.3.2 // indirect + github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect + github.com/Crocmagnon/fatcontext v0.5.2 // indirect github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect - github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 // indirect + github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 // indirect github.com/Masterminds/semver v1.5.0 // indirect + github.com/Masterminds/semver/v3 v3.3.0 // indirect github.com/Masterminds/sprig v2.15.0+incompatible // indirect - github.com/OpenPeeDeeP/depguard/v2 v2.1.0 // indirect - github.com/alexkohler/nakedret/v2 v2.0.2 // indirect + github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect + github.com/alecthomas/go-check-sumtype v0.1.4 // indirect + github.com/alexkohler/nakedret/v2 v2.0.4 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect github.com/alingse/asasalint v0.0.11 // indirect github.com/aokoli/goutils v1.0.1 // indirect - github.com/ashanbrown/forbidigo v1.5.3 // indirect + github.com/ashanbrown/forbidigo v1.6.0 // indirect github.com/ashanbrown/makezero v1.1.1 // indirect github.com/aws/aws-sdk-go v1.44.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bkielbasa/cyclop v1.2.1 // indirect github.com/blizzy78/varnamelen v0.8.0 // indirect - github.com/bombsimon/wsl/v3 v3.4.0 // indirect - github.com/breml/bidichk v0.2.4 // indirect - github.com/breml/errchkjson v0.3.1 // indirect - github.com/butuzov/ireturn v0.2.0 // indirect - github.com/butuzov/mirror v1.1.0 // indirect + github.com/bombsimon/wsl/v4 v4.4.1 // indirect + github.com/breml/bidichk v0.2.7 // indirect + github.com/breml/errchkjson v0.3.6 // indirect + github.com/butuzov/ireturn v0.3.0 // indirect + github.com/butuzov/mirror v1.2.0 // indirect + github.com/catenacyber/perfsprint v0.7.1 // indirect + github.com/ccojocar/zxcvbn-go v1.0.2 // indirect github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/charithe/durationcheck v0.0.10 // indirect - github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 // indirect + github.com/chavacava/garif v0.1.0 // indirect github.com/chigopher/pathlib v0.19.1 // indirect + github.com/ckaznocha/intrange v0.2.0 // indirect github.com/coocood/freecache v1.1.1 // indirect github.com/curioswitch/go-reassign v0.2.0 // indirect - github.com/daixiang0/gci v0.10.1 // indirect + github.com/daixiang0/gci v0.13.5 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/denis-tingaikin/go-header v0.4.3 // indirect + github.com/denis-tingaikin/go-header v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/ernesto-jimenez/gogen v0.0.0-20180125220232-d7d4131e6607 // indirect - github.com/esimonov/ifshort v1.0.4 // indirect - github.com/ettle/strcase v0.1.1 // indirect + github.com/ettle/strcase v0.2.0 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect - github.com/fatih/color v1.15.0 // indirect + github.com/fatih/color v1.17.0 // indirect github.com/fatih/structtag v1.2.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/firefart/nonamedreturns v1.0.4 // indirect + github.com/firefart/nonamedreturns v1.0.5 // indirect github.com/flyteorg/stow v0.3.10 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/fzipp/gocyclo v0.6.0 // indirect github.com/ghodss/yaml v1.0.0 // indirect - github.com/go-critic/go-critic v0.8.1 // indirect - github.com/go-logr/logr v1.3.0 // indirect + github.com/ghostiam/protogetter v0.3.6 // indirect + github.com/go-critic/go-critic v0.11.4 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/go-toolsmith/astcast v1.1.0 // indirect github.com/go-toolsmith/astcopy v1.1.0 // indirect - github.com/go-toolsmith/astequal v1.1.0 // indirect + github.com/go-toolsmith/astequal v1.2.0 // indirect github.com/go-toolsmith/astfmt v1.1.0 // indirect github.com/go-toolsmith/astp v1.1.0 // indirect github.com/go-toolsmith/strparse v1.1.0 // indirect github.com/go-toolsmith/typep v1.1.0 // indirect + github.com/go-viper/mapstructure/v2 v2.1.0 // indirect github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect github.com/gobwas/glob v0.2.3 // indirect - github.com/gofrs/flock v0.8.1 // indirect + github.com/gofrs/flock v0.12.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect - github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect - github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 // indirect - github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect - github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect - github.com/golangci/misspell v0.4.0 // indirect - github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 // indirect - github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect + github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9 // indirect + github.com/golangci/misspell v0.6.0 // indirect + github.com/golangci/modinfo v0.3.4 // indirect + github.com/golangci/plugin-module-register v0.1.1 // indirect + github.com/golangci/revgrep v0.5.3 // indirect + github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/s2a-go v0.1.7 // indirect + github.com/google/s2a-go v0.1.8 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.0 // indirect - github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.3 // indirect + github.com/googleapis/gax-go/v2 v2.13.0 // indirect + github.com/gordonklaus/ineffassign v0.1.0 // indirect github.com/gostaticanalysis/analysisutil v0.7.1 // indirect github.com/gostaticanalysis/comment v1.4.2 // indirect github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect github.com/gostaticanalysis/nilerr v0.1.1 // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-version v1.6.0 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect github.com/huandu/xstrings v1.4.0 // indirect github.com/iancoleman/strcase v0.3.0 // indirect github.com/imdario/mergo v0.3.6 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/jgautheron/goconst v1.5.1 // indirect + github.com/jgautheron/goconst v1.7.1 // indirect github.com/jingyugao/rowserrcheck v1.1.1 // indirect github.com/jinzhu/copier v0.3.5 // indirect github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect + github.com/jjti/go-spancheck v0.6.2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/julz/importas v0.1.0 // indirect - github.com/kisielk/errcheck v1.6.3 // indirect - github.com/kisielk/gotool v1.0.0 // indirect - github.com/kkHAIKE/contextcheck v1.1.4 // indirect + github.com/karamaru-alpha/copyloopvar v1.1.0 // indirect + github.com/kisielk/errcheck v1.7.0 // indirect + github.com/kkHAIKE/contextcheck v1.1.5 // indirect github.com/kulti/thelper v0.6.3 // indirect - github.com/kunwardeep/paralleltest v1.0.7 // indirect + github.com/kunwardeep/paralleltest v1.0.10 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/kyoh86/exportloopref v0.1.11 // indirect - github.com/ldez/gomoddirectives v0.2.3 // indirect + github.com/lasiar/canonicalheader v1.1.1 // indirect + github.com/ldez/gomoddirectives v0.2.4 // indirect github.com/ldez/tagliatelle v0.5.0 // indirect - github.com/leonklingele/grouper v1.1.1 // indirect + github.com/leonklingele/grouper v1.1.2 // indirect github.com/lufeee/execinquery v1.2.1 // indirect + github.com/macabu/inamedparam v0.1.3 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/maratori/testableexamples v1.0.0 // indirect github.com/maratori/testpackage v1.1.1 // indirect github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/mbilski/exhaustivestruct v1.2.0 // indirect - github.com/mgechev/revive v1.3.2 // indirect + github.com/mgechev/revive v1.3.9 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/moricho/tparallel v0.3.1 // indirect + github.com/moricho/tparallel v0.3.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007 // indirect github.com/nakabonne/nestif v0.3.1 // indirect - github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect github.com/ncw/swift v1.0.53 // indirect - github.com/nishanths/exhaustive v0.11.0 // indirect + github.com/nishanths/exhaustive v0.12.0 // indirect github.com/nishanths/predeclared v0.2.2 // indirect - github.com/nunnatsa/ginkgolinter v0.12.1 // indirect + github.com/nunnatsa/ginkgolinter v0.16.2 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/pascaldekloe/name v0.0.0-20180628100202-0fd16699aae1 // indirect - github.com/pelletier/go-toml/v2 v2.0.6 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/polyfloyd/go-errorlint v1.4.2 // indirect + github.com/polyfloyd/go-errorlint v1.6.0 // indirect github.com/prometheus/client_golang v1.16.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect github.com/pseudomuto/protokit v0.2.0 // indirect - github.com/quasilyte/go-ruleguard v0.3.19 // indirect + github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 // indirect + github.com/quasilyte/go-ruleguard/dsl v0.3.22 // indirect github.com/quasilyte/gogrep v0.5.0 // indirect github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect github.com/rs/zerolog v1.29.0 // indirect - github.com/ryancurrah/gomodguard v1.3.0 // indirect - github.com/ryanrolds/sqlclosecheck v0.4.0 // indirect + github.com/ryancurrah/gomodguard v1.3.5 // indirect + github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect + github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect github.com/sashamelentyev/interfacebloat v1.1.0 // indirect - github.com/sashamelentyev/usestdlibvars v1.23.0 // indirect - github.com/securego/gosec/v2 v2.16.0 // indirect + github.com/sashamelentyev/usestdlibvars v1.27.0 // indirect + github.com/securego/gosec/v2 v2.21.2 // indirect github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sivchari/containedctx v1.0.3 // indirect - github.com/sivchari/nosnakecase v1.7.0 // indirect - github.com/sivchari/tenv v1.7.1 // indirect + github.com/sivchari/tenv v1.10.0 // indirect github.com/sonatard/noctx v0.0.2 // indirect github.com/sourcegraph/go-diff v0.7.0 // indirect - github.com/spf13/afero v1.10.0 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.0 // indirect - github.com/spf13/cobra v1.7.0 // indirect + github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/viper v1.15.0 // indirect @@ -207,67 +214,65 @@ require ( github.com/stretchr/objx v0.5.2 // indirect github.com/stretchr/testify v1.9.0 // indirect github.com/subosito/gotenv v1.4.2 // indirect - github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect github.com/tdakkota/asciicheck v0.2.0 // indirect - github.com/tetafro/godot v1.4.11 // indirect + github.com/tetafro/godot v1.4.17 // indirect github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 // indirect github.com/timonwong/loggercheck v0.9.4 // indirect - github.com/tomarrell/wrapcheck/v2 v2.8.1 // indirect + github.com/tomarrell/wrapcheck/v2 v2.9.0 // indirect github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect - github.com/ultraware/funlen v0.0.3 // indirect - github.com/ultraware/whitespace v0.0.5 // indirect - github.com/uudashr/gocognit v1.0.6 // indirect - github.com/xen0n/gosmopolitan v1.2.1 // indirect + github.com/ultraware/funlen v0.1.0 // indirect + github.com/ultraware/whitespace v0.1.1 // indirect + github.com/uudashr/gocognit v1.1.3 // indirect + github.com/xen0n/gosmopolitan v1.2.2 // indirect github.com/yagipy/maintidx v1.0.0 // indirect - github.com/yeya24/promlinter v0.2.0 // indirect - github.com/ykadowak/zerologlint v0.1.2 // indirect - gitlab.com/bosi/decorder v0.2.3 // indirect + github.com/yeya24/promlinter v0.3.0 // indirect + github.com/ykadowak/zerologlint v0.1.5 // indirect + gitlab.com/bosi/decorder v0.4.2 // indirect + go-simpler.org/musttag v0.12.2 // indirect + go-simpler.org/sloglint v0.7.2 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect - go.opentelemetry.io/otel v1.21.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/otel v1.29.0 // indirect go.opentelemetry.io/otel/exporters/jaeger v1.17.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.19.0 // indirect - go.opentelemetry.io/otel/metric v1.21.0 // indirect - go.opentelemetry.io/otel/sdk v1.21.0 // indirect - go.opentelemetry.io/otel/trace v1.21.0 // indirect - go.tmz.dev/musttag v0.7.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/sdk v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.29.0 // indirect + go.uber.org/automaxprocs v1.5.3 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.25.0 // indirect - golang.org/x/crypto v0.25.0 // indirect - golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea // indirect - golang.org/x/exp/typeparams v0.0.0-20230224173230-c95f2b4c22f2 // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.27.0 // indirect - golang.org/x/oauth2 v0.16.0 // indirect - golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/term v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect - golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect - google.golang.org/api v0.155.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/grpc v1.62.1 // indirect - google.golang.org/protobuf v1.33.0 // indirect + golang.org/x/crypto v0.27.0 // indirect + golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e // indirect + golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.28.0 // indirect + golang.org/x/oauth2 v0.22.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/term v0.24.0 // indirect + golang.org/x/text v0.18.0 // indirect + golang.org/x/time v0.6.0 // indirect + golang.org/x/tools v0.24.0 // indirect + google.golang.org/api v0.196.0 // indirect + google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/grpc v1.66.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - honnef.co/go/tools v0.4.3 // indirect + honnef.co/go/tools v0.5.1 // indirect k8s.io/api v0.28.2 // indirect k8s.io/apimachinery v0.28.2 // indirect k8s.io/client-go v0.28.1 // indirect k8s.io/klog/v2 v2.100.1 // indirect k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect - mvdan.cc/gofumpt v0.5.0 // indirect - mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect - mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect - mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d // indirect + mvdan.cc/gofumpt v0.7.0 // indirect + mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f // indirect sigs.k8s.io/controller-runtime v0.0.0-00010101000000-000000000000 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect diff --git a/boilerplate/flyte/golang_support_tools/go.sum b/boilerplate/flyte/golang_support_tools/go.sum index 6aa6b8d969..fc017b6f44 100644 --- a/boilerplate/flyte/golang_support_tools/go.sum +++ b/boilerplate/flyte/golang_support_tools/go.sum @@ -3,61 +3,30 @@ 4d63.com/gochecknoglobals v0.2.1 h1:1eiorGsgHOFOuoOiJDy2psSrQbRdIHrlge0IJIkUgDc= 4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= -cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= -cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8= -cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/4meepo/tagalign v1.2.2 h1:kQeUTkFTaBRtd/7jm8OKJl9iHk0gAO+TDFPHGSna0aw= -github.com/4meepo/tagalign v1.2.2/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE= -github.com/Abirdcfly/dupword v0.0.11 h1:z6v8rMETchZXUIuHxYNmlUAuKuB21PeaSymTed16wgU= -github.com/Abirdcfly/dupword v0.0.11/go.mod h1:wH8mVGuf3CP5fsBTkfWwwwKTjDnVVCxtU8d8rgeVYXA= -github.com/Antonboom/errname v0.1.10 h1:RZ7cYo/GuZqjr1nuJLNe8ZH+a+Jd9DaZzttWzak9Bls= -github.com/Antonboom/errname v0.1.10/go.mod h1:xLeiCIrvVNpUtsN0wxAh05bNIZpqE22/qDMnTBTttiA= -github.com/Antonboom/nilnil v0.1.5 h1:X2JAdEVcbPaOom2TUa1FxZ3uyuUlex0XMLGYMemu6l0= -github.com/Antonboom/nilnil v0.1.5/go.mod h1:I24toVuBKhfP5teihGWctrRiPbRKHwZIFOvc6v3HZXk= +cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= +cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= +cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U= +cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk= +cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= +cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/iam v1.2.0 h1:kZKMKVNk/IsSSc/udOb83K0hL/Yh/Gcqpz+oAkoIFN8= +cloud.google.com/go/iam v1.2.0/go.mod h1:zITGuWgsLZxd8OwAlX+eMFgZDXzBm7icj1PVTYG766Q= +cloud.google.com/go/longrunning v0.6.0 h1:mM1ZmaNsQsnb+5n1DNPeL0KwQd9jQRqSqSDEkBZr+aI= +cloud.google.com/go/longrunning v0.6.0/go.mod h1:uHzSZqW89h7/pasCWNYdUpwGz3PcVWhrWupreVPYLts= +cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= +cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= +github.com/4meepo/tagalign v1.3.4 h1:P51VcvBnf04YkHzjfclN6BbsopfJR5rxs1n+5zHt+w8= +github.com/4meepo/tagalign v1.3.4/go.mod h1:M+pnkHH2vG8+qhE5bVc/zeP7HS/j910Fwa9TUSyZVI0= +github.com/Abirdcfly/dupword v0.1.1 h1:Bsxe0fIw6OwBtXMIncaTxCLHYO5BB+3mcsR5E8VXloY= +github.com/Abirdcfly/dupword v0.1.1/go.mod h1:B49AcJdTYYkpd4HjgAcutNGG9HZ2JWwKunH9Y2BA6sM= +github.com/Antonboom/errname v0.1.13 h1:JHICqsewj/fNckzrfVSe+T33svwQxmjC+1ntDsHOVvM= +github.com/Antonboom/errname v0.1.13/go.mod h1:uWyefRYRN54lBg6HseYCFhs6Qjcy41Y3Jl/dVhA87Ns= +github.com/Antonboom/nilnil v0.1.9 h1:eKFMejSxPSA9eLSensFmjW2XTgTwJMjZ8hUHtV4s/SQ= +github.com/Antonboom/nilnil v0.1.9/go.mod h1:iGe2rYwCq5/Me1khrysB4nwI7swQvjclR8/YRPl5ihQ= +github.com/Antonboom/testifylint v1.4.3 h1:ohMt6AHuHgttaQ1xb6SSnxCeK4/rnK7KKzbvs7DmEck= +github.com/Antonboom/testifylint v1.4.3/go.mod h1:+8Q9+AOLsz5ZiQiiYujJKs9mNz398+M6UgslP4qgJLA= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= @@ -71,31 +40,35 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0/go.mod h1:WCPBHsOXfB github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= -github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs= +github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/Crocmagnon/fatcontext v0.5.2 h1:vhSEg8Gqng8awhPju2w7MKHqMlg4/NI+gSDHtR3xgwA= +github.com/Crocmagnon/fatcontext v0.5.2/go.mod h1:87XhRMaInHP44Q7Tlc7jkgKKB7kZAOPiDkFMdKCC+74= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= github.com/EngHabu/mockery v0.0.0-20220916190332-dde70e38baba h1:HDBbUo0odjuCCtStDS//vNd3CeP1GdjQVhFmSZLnFwU= github.com/EngHabu/mockery v0.0.0-20220916190332-dde70e38baba/go.mod h1:DjqxgJ6VUERvvVE41d4Rrn72K29MXwk9ziY18bi36BU= -github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 h1:+r1rSv4gvYn0wmRjC8X7IAzX8QezqtFV9m0MUHFJgts= -github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0/go.mod h1:b3g59n2Y+T5xmcxJL+UEG2f8cQploZm1mR/v6BW0mU0= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 h1:/fTUt5vmbkAcMBt4YQiuC23cV0kEsN1MVMNqeOW43cU= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0/go.mod h1:ONJg5sxcbsdQQ4pOW8TGdTidT2TMAUy/2Xhr8mrYaao= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= +github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Masterminds/sprig v2.15.0+incompatible h1:0gSxPGWS9PAr7U2NsQ2YQg6juRDINkUyuvbb4b2Xm8w= github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OpenPeeDeeP/depguard/v2 v2.1.0 h1:aQl70G173h/GZYhWf36aE5H0KaujXfVMnn/f1kSDVYY= -github.com/OpenPeeDeeP/depguard/v2 v2.1.0/go.mod h1:PUBgk35fX4i7JDmwzlJwJ+GMe6NfO1723wmJMgPThNQ= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alexkohler/nakedret/v2 v2.0.2 h1:qnXuZNvv3/AxkAb22q/sEsEpcA99YxLFACDtEw9TPxE= -github.com/alexkohler/nakedret/v2 v2.0.2/go.mod h1:2b8Gkk0GsOrqQv/gPWjNLDSKwG8I5moSXG1K4VIBcTQ= +github.com/OpenPeeDeeP/depguard/v2 v2.2.0 h1:vDfG60vDtIuf0MEOhmLlLLSzqaRM8EMcgJPdp74zmpA= +github.com/OpenPeeDeeP/depguard/v2 v2.2.0/go.mod h1:CIzddKRvLBC4Au5aYP/i3nyaWQ+ClszLIuVocRiCYFQ= +github.com/alecthomas/assert/v2 v2.2.2 h1:Z/iVC0xZfWTaFNE6bA3z07T86hd45Xe2eLt6WVy2bbk= +github.com/alecthomas/assert/v2 v2.2.2/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= +github.com/alecthomas/go-check-sumtype v0.1.4 h1:WCvlB3l5Vq5dZQTFmodqL2g68uHiSwwlWcT5a2FGK0c= +github.com/alecthomas/go-check-sumtype v0.1.4/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ= +github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= +github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/alexkohler/nakedret/v2 v2.0.4 h1:yZuKmjqGi0pSmjGpOC016LtPJysIL0WEUiaXW5SUnNg= +github.com/alexkohler/nakedret/v2 v2.0.4/go.mod h1:bF5i0zF2Wo2o4X4USt9ntUWve6JbFv02Ff4vlkmS/VU= github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= @@ -104,98 +77,89 @@ github.com/alvaroloes/enumer v1.1.2 h1:5khqHB33TZy1GWCO/lZwcroBFh7u+0j40T83VUbfA github.com/alvaroloes/enumer v1.1.2/go.mod h1:FxrjvuXoDAx9isTJrv4c+T410zFi0DtXIT0m65DJ+Wo= github.com/aokoli/goutils v1.0.1 h1:7fpzNGoJ3VA8qcrm++XEE1QUe0mIwNeLa02Nwq7RDkg= github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= -github.com/ashanbrown/forbidigo v1.5.3 h1:jfg+fkm/snMx+V9FBwsl1d340BV/99kZGv5jN9hBoXk= -github.com/ashanbrown/forbidigo v1.5.3/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= +github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8gerOIVIY= +github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= github.com/aws/aws-sdk-go v1.44.2 h1:5VBk5r06bgxgRKVaUtm1/4NT/rtrnH2E4cnAYv5zgQc= github.com/aws/aws-sdk-go v1.44.2/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bkielbasa/cyclop v1.2.1 h1:AeF71HZDob1P2/pRm1so9cd1alZnrpyc4q2uP2l0gJY= github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM= github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= -github.com/bombsimon/wsl/v3 v3.4.0 h1:RkSxjT3tmlptwfgEgTgU+KYKLI35p/tviNXNXiL2aNU= -github.com/bombsimon/wsl/v3 v3.4.0/go.mod h1:KkIB+TXkqy6MvK9BDZVbZxKNYsE1/oLRJbIFtf14qqo= -github.com/breml/bidichk v0.2.4 h1:i3yedFWWQ7YzjdZJHnPo9d/xURinSq3OM+gyM43K4/8= -github.com/breml/bidichk v0.2.4/go.mod h1:7Zk0kRFt1LIZxtQdl9W9JwGAcLTTkOs+tN7wuEYGJ3s= -github.com/breml/errchkjson v0.3.1 h1:hlIeXuspTyt8Y/UmP5qy1JocGNR00KQHgfaNtRAjoxQ= -github.com/breml/errchkjson v0.3.1/go.mod h1:XroxrzKjdiutFyW3nWhw34VGg7kiMsDQox73yWCGI2U= -github.com/butuzov/ireturn v0.2.0 h1:kCHi+YzC150GE98WFuZQu9yrTn6GEydO2AuPLbTgnO4= -github.com/butuzov/ireturn v0.2.0/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= -github.com/butuzov/mirror v1.1.0 h1:ZqX54gBVMXu78QLoiqdwpl2mgmoOJTk7s4p4o+0avZI= -github.com/butuzov/mirror v1.1.0/go.mod h1:8Q0BdQU6rC6WILDiBM60DBfvV78OLJmMmixe7GF45AE= +github.com/bombsimon/wsl/v4 v4.4.1 h1:jfUaCkN+aUpobrMO24zwyAMwMAV5eSziCkOKEauOLdw= +github.com/bombsimon/wsl/v4 v4.4.1/go.mod h1:Xu/kDxGZTofQcDGCtQe9KCzhHphIe0fDuyWTxER9Feo= +github.com/breml/bidichk v0.2.7 h1:dAkKQPLl/Qrk7hnP6P+E0xOodrq8Us7+U0o4UBOAlQY= +github.com/breml/bidichk v0.2.7/go.mod h1:YodjipAGI9fGcYM7II6wFvGhdMYsC5pHDlGzqvEW3tQ= +github.com/breml/errchkjson v0.3.6 h1:VLhVkqSBH96AvXEyclMR37rZslRrY2kcyq+31HCsVrA= +github.com/breml/errchkjson v0.3.6/go.mod h1:jhSDoFheAF2RSDOlCfhHO9KqhZgAYLyvHe7bRCX8f/U= +github.com/butuzov/ireturn v0.3.0 h1:hTjMqWw3y5JC3kpnC5vXmFJAWI/m31jaCYQqzkS6PL0= +github.com/butuzov/ireturn v0.3.0/go.mod h1:A09nIiwiqzN/IoVo9ogpa0Hzi9fex1kd9PSD6edP5ZA= +github.com/butuzov/mirror v1.2.0 h1:9YVK1qIjNspaqWutSv8gsge2e/Xpq1eqEkslEUHy5cs= +github.com/butuzov/mirror v1.2.0/go.mod h1:DqZZDtzm42wIAIyHXeN8W/qb1EPlb9Qn/if9icBOpdQ= +github.com/catenacyber/perfsprint v0.7.1 h1:PGW5G/Kxn+YrN04cRAZKC+ZuvlVwolYMrIyyTJ/rMmc= +github.com/catenacyber/perfsprint v0.7.1/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50= +github.com/ccojocar/zxcvbn-go v1.0.2 h1:na/czXU8RrhXO4EZme6eQJLR4PzcGsahsBOAwU6I3Vg= +github.com/ccojocar/zxcvbn-go v1.0.2/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4= github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= -github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 h1:W9o46d2kbNL06lq7UNDPV0zYLzkrde/bjIqO02eoll0= -github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8/go.mod h1:gakxgyXaaPkxvLw1XQxNGK4I37ys9iBRzNUx/B7pUCo= +github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc= +github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U= github.com/chigopher/pathlib v0.19.1 h1:RoLlUJc0CqBGwq239cilyhxPNLXTK+HXoASGyGznx5A= github.com/chigopher/pathlib v0.19.1/go.mod h1:tzC1dZLW8o33UQpWkNkhvPwL5n4yyFRFm/jL1YGWFvY= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/ckaznocha/intrange v0.2.0 h1:FykcZuJ8BD7oX93YbO1UY9oZtkRbp+1/kJcDjkefYLs= +github.com/ckaznocha/intrange v0.2.0/go.mod h1:r5I7nUlAAG56xmkOpw4XVr16BXhwYTUdcuRFeevn1oE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= github.com/coocood/freecache v1.1.1 h1:uukNF7QKCZEdZ9gAV7WQzvh0SbjwdMF6m3x3rxEkaPc= github.com/coocood/freecache v1.1.1/go.mod h1:OKrEjkGVoxZhyWAJoeFi5BMLUJm2Tit0kpGkIr7NGYY= github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= -github.com/daixiang0/gci v0.10.1 h1:eheNA3ljF6SxnPD/vE4lCBusVHmV3Rs3dkKvFrJ7MR0= -github.com/daixiang0/gci v0.10.1/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI= +github.com/daixiang0/gci v0.13.5 h1:kThgmH1yBmZSBCh1EJVxQ7JsHpm5Oms0AMed/0LaH4c= +github.com/daixiang0/gci v0.13.5/go.mod h1:12etP2OniiIdP4q+kjUGrC/rUagga7ODbqsom5Eo5Yk= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU= -github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c= +github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42t4429eC9k8= +github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY= github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.3.0-java/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/ernesto-jimenez/gogen v0.0.0-20180125220232-d7d4131e6607 h1:cTavhURetDkezJCvxFggiyLeP40Mrk/TtVg2+ycw1Es= github.com/ernesto-jimenez/gogen v0.0.0-20180125220232-d7d4131e6607/go.mod h1:Cg4fM0vhYWOZdgM7RIOSTRNIc8/VT7CXClC3Ni86lu4= -github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA= -github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= -github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= -github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= +github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q= +github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y= -github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI= +github.com/firefart/nonamedreturns v1.0.5 h1:tM+Me2ZaXs8tfdDw3X6DOX++wMCOqzYUho6tUTYIdRA= +github.com/firefart/nonamedreturns v1.0.5/go.mod h1:gHJjDqhGM4WyPt639SOZs+G89Ko7QKH5R5BhnO6xJhw= github.com/flyteorg/flyte/flytestdlib v1.11.0 h1:DxM/sf6H0ong8LIjgh0YwXK+abnGV8kWVi6EgfVCkO8= github.com/flyteorg/flyte/flytestdlib v1.11.0/go.mod h1:AmgNCq/tGEDwVfilW1nFtgPQn8vQ9gcDu6SNwz1YY+M= github.com/flyteorg/protoc-gen-doc v1.4.2 h1:Otw0F+RHaPQ8XlpzhLLgjsCMcrAIcMO01Zh+ALe3rrE= @@ -210,21 +174,14 @@ github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-critic/go-critic v0.8.1 h1:16omCF1gN3gTzt4j4J6fKI/HnRojhEp+Eks6EuKw3vw= -github.com/go-critic/go-critic v0.8.1/go.mod h1:kpzXl09SIJX1cr9TB/g/sAG+eFEl7ZS9f9cqvZtyNl0= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/ghostiam/protogetter v0.3.6 h1:R7qEWaSgFCsy20yYHNIJsU9ZOb8TziSRRxuAOTVKeOk= +github.com/ghostiam/protogetter v0.3.6/go.mod h1:7lpeDnEJ1ZjL/YtyoN99ljO4z0pd3H0d18/t2dPBxHw= +github.com/go-critic/go-critic v0.11.4 h1:O7kGOCx0NDIni4czrkRIXTnit0mkyKOCePh3My6OyEU= +github.com/go-critic/go-critic v0.11.4/go.mod h1:2QAdo4iuLik5S9YG0rT4wcZ8QxwHYkrr6/2MWAiv/vc= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= @@ -235,16 +192,19 @@ github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2Kv github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= +github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8= github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU= github.com/go-toolsmith/astcopy v1.1.0 h1:YGwBN0WM+ekI/6SS6+52zLDEf8Yvp3n2seZITCUBt5s= github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw= github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= -github.com/go-toolsmith/astequal v1.1.0 h1:kHKm1AWqClYn15R0K1KKE4RG614D46n+nqUQ06E1dTw= github.com/go-toolsmith/astequal v1.1.0/go.mod h1:sedf7VIdCL22LD8qIvv7Nn9MuWJruQA/ysswh64lffQ= +github.com/go-toolsmith/astequal v1.2.0 h1:3Fs3CYZ1k9Vo4FzFhwwewC3CHISHDnVUPC4x0bI2+Cw= +github.com/go-toolsmith/astequal v1.2.0/go.mod h1:c8NZ3+kSFtFY/8lPso4v8LuJjdJiUFVnSuU3s0qrrDY= github.com/go-toolsmith/astfmt v1.1.0 h1:iJVPDPp6/7AaeLJEruMsBUlOYCmvg0MoCfJprsOmcco= github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4= github.com/go-toolsmith/astp v1.1.0 h1:dXPuCl6u2llURjdPLLDxJeZInAeZ0/eZwFJmqZMnpQA= @@ -256,37 +216,27 @@ github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQi github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= +github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w= +github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U= github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= -github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -295,45 +245,35 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6J5HIP8ZtyMdiDscjMLfRBSPuzVVeo= -github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ= -github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 h1:amWTbTGqOZ71ruzrdA+Nx5WA3tV1N0goTspwmKCQvBY= -github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs= -github.com/golangci/golangci-lint v1.53.3 h1:CUcRafczT4t1F+mvdkUm6KuOpxUZTl0yWN/rSU6sSMo= -github.com/golangci/golangci-lint v1.53.3/go.mod h1:W4Gg3ONq6p3Jl+0s/h9Gr0j7yEgHJWWZO2bHl2tBUXM= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.4.0 h1:KtVB/hTK4bbL/S6bs64rYyk8adjmh1BygbBiaAiX+a0= -github.com/golangci/misspell v0.4.0/go.mod h1:W6O/bwV6lGDxUCChm2ykw9NQdd5bYd1Xkjo88UcWyJc= -github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 h1:DIPQnGy2Gv2FSA4B/hh8Q7xx3B7AIDk3DAMeHclH1vQ= -github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9 h1:/1322Qns6BtQxUZDTAT4SdcoxknUki7IAoK4SAXr8ME= +github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9/go.mod h1:Oesb/0uFAyWoaw1U1qS5zyjCg5NP9C9iwjnI4tIsXEE= +github.com/golangci/golangci-lint v1.61.0 h1:VvbOLaRVWmyxCnUIMTbf1kDsaJbTzH20FAMXTAlQGu8= +github.com/golangci/golangci-lint v1.61.0/go.mod h1:e4lztIrJJgLPhWvFPDkhiMwEFRrWlmFbrZea3FsJyN8= +github.com/golangci/misspell v0.6.0 h1:JCle2HUTNWirNlDIAUO44hUsKhOFqGPoC4LZxlaSXDs= +github.com/golangci/misspell v0.6.0/go.mod h1:keMNyY6R9isGaSAu+4Q8NMBwMPkh15Gtc8UCVoDtAWo= +github.com/golangci/modinfo v0.3.4 h1:oU5huX3fbxqQXdfspamej74DFX0kyGLkw1ppvXoJ8GA= +github.com/golangci/modinfo v0.3.4/go.mod h1:wytF1M5xl9u0ij8YSvhkEVPP3M5Mc7XLl1pxH3B2aUM= +github.com/golangci/plugin-module-register v0.1.1 h1:TCmesur25LnyJkpsVrupv1Cdzo+2f7zX0H6Jkw1Ol6c= +github.com/golangci/plugin-module-register v0.1.1/go.mod h1:TTpqoB6KkwOJMV8u7+NyXMrkwwESJLOkfl9TxR1DGFc= +github.com/golangci/revgrep v0.5.3 h1:3tL7c1XBMtWHHqVpS5ChmiAAoe4PF/d5+ULzV9sLAzs= +github.com/golangci/revgrep v0.5.3/go.mod h1:U4R/s9dlXZsg8uJmaR1GrloUr14D7qDl8gi2iPXJH8k= +github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed h1:IURFTjxeTfNFP0hTEi1YKjB/ub8zkpaOqFFMApi2EAs= +github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed/go.mod h1:XLXN8bNw4CGRPaqgl3bv/lhz7bsGPh4/xSaMTbo2vkQ= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= @@ -341,40 +281,22 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= -github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= +github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA= +github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 h1:mrEEilTAUmaAORhssPPkxj84TsHrPMLBGW2Z4SoTxm8= -github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= +github.com/googleapis/enterprise-certificate-proxy v0.3.3 h1:QRje2j5GZimBzlbhGA2V2QlGNgL8G6e+wGo/+/2bWI0= +github.com/googleapis/enterprise-certificate-proxy v0.3.3/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= +github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= +github.com/gordonklaus/ineffassign v0.1.0 h1:y2Gd/9I7MdY1oEIt+n+rowjBNDcLQq3RsH5hwJd0f9s= +github.com/gordonklaus/ineffassign v0.1.0/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= @@ -387,16 +309,9 @@ github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= @@ -406,52 +321,40 @@ github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= -github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jgautheron/goconst v1.7.1 h1:VpdAG7Ca7yvvJk5n8dMwQhfEZJh95kl/Hl9S1OI5Jkk= +github.com/jgautheron/goconst v1.7.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= +github.com/jjti/go-spancheck v0.6.2 h1:iYtoxqPMzHUPp7St+5yA8+cONdyXD3ug6KK15n7Pklk= +github.com/jjti/go-spancheck v0.6.2/go.mod h1:+X7lvIrR5ZdUTkxFYqzJ0abr8Sb5LOo80uOhWNqIrYA= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY= github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/karamaru-alpha/copyloopvar v1.1.0 h1:x7gNyKcC2vRBO1H2Mks5u1VxQtYvFiym7fCjIP8RPos= +github.com/karamaru-alpha/copyloopvar v1.1.0/go.mod h1:u7CIfztblY0jZLOQZgH3oYsJzpC2A7S6u/lfgSXHy0k= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/errcheck v1.6.3 h1:dEKh+GLHcWm2oN34nMvDzn1sqI0i0WxPvrgiJA5JuM8= -github.com/kisielk/errcheck v1.6.3/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw= -github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/kisielk/errcheck v1.7.0 h1:+SbscKmWJ5mOK/bO1zS60F5I9WwZDWOfRsC4RwfwRV0= +github.com/kisielk/errcheck v1.7.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kkHAIKE/contextcheck v1.1.4 h1:B6zAaLhOEEcjvUgIYEqystmnFk1Oemn8bvJhbt0GMb8= -github.com/kkHAIKE/contextcheck v1.1.4/go.mod h1:1+i/gWqokIa+dm31mqGLZhZJ7Uh44DJGZVmr6QRBNJg= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kkHAIKE/contextcheck v1.1.5 h1:CdnJh63tcDe53vG+RebdpdXJTc9atMgGqdx8LXxiilg= +github.com/kkHAIKE/contextcheck v1.1.5/go.mod h1:O930cpht4xb1YQpK+1+AgoM3mFsvxr7uyFptcnWTYUA= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -461,20 +364,24 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= -github.com/kunwardeep/paralleltest v1.0.7 h1:2uCk94js0+nVNQoHZNLBkAR1DQJrVzw6T0RMzJn55dQ= -github.com/kunwardeep/paralleltest v1.0.7/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY= +github.com/kunwardeep/paralleltest v1.0.10 h1:wrodoaKYzS2mdNVnc4/w31YaXFtsc21PCTdvWJ/lDDs= +github.com/kunwardeep/paralleltest v1.0.10/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ= github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= -github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA= -github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= +github.com/lasiar/canonicalheader v1.1.1 h1:wC+dY9ZfiqiPwAexUApFush/csSPXeIi4QqyxXmng8I= +github.com/lasiar/canonicalheader v1.1.1/go.mod h1:cXkb3Dlk6XXy+8MVQnF23CYKWlyA7kfQhSw2CcZtZb0= +github.com/ldez/gomoddirectives v0.2.4 h1:j3YjBIjEBbqZ0NKtBNzr8rtMHTOrLPeiwTkfUJZ3alg= +github.com/ldez/gomoddirectives v0.2.4/go.mod h1:oWu9i62VcQDYp9EQ0ONTfqLNh+mDLWWDO+SO0qSQw5g= github.com/ldez/tagliatelle v0.5.0 h1:epgfuYt9v0CG3fms0pEgIMNPuFf/LpPIfjk4kyqSioo= github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4= -github.com/leonklingele/grouper v1.1.1 h1:suWXRU57D4/Enn6pXR0QVqqWWrnJ9Osrz+5rjt8ivzU= -github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= +github.com/leonklingele/grouper v1.1.2 h1:o1ARBDLOmmasUaNDesWqWCIFH3u7hoFlM84YrjT3mIY= +github.com/leonklingele/grouper v1.1.2/go.mod h1:6D0M/HVkhs2yRKRFZUoGjeDy7EZTfFBE9gl4kjmIGkA= github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= +github.com/macabu/inamedparam v0.1.3 h1:2tk/phHkMlEL/1GNe/Yf6kkR/hkcUdAEY3L0hjYV1Mk= +github.com/macabu/inamedparam v0.1.3/go.mod h1:93FLICAIk/quk7eaPPQvbzihUdn/QkGDwIZEoLtpH6I= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= @@ -492,17 +399,14 @@ github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxec github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo= -github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mgechev/revive v1.3.2 h1:Wb8NQKBaALBJ3xrrj4zpwJwqwNA6nDpyJSEQWcCka6U= -github.com/mgechev/revive v1.3.2/go.mod h1:UCLtc7o5vg5aXCwdUTU1kEBQ1v+YXPAkYDIDXbrs5I0= +github.com/mgechev/revive v1.3.9 h1:18Y3R4a2USSBF+QZKFQwVkBROUda7uoBlkEuBD+YD1A= +github.com/mgechev/revive v1.3.9/go.mod h1:+uxEIr5UH0TjXWHTno3xh4u7eg6jDpXKzQccA9UGhHU= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -510,140 +414,116 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/moricho/tparallel v0.3.1 h1:fQKD4U1wRMAYNngDonW5XupoB/ZGJHdpzrWqgyg9krA= -github.com/moricho/tparallel v0.3.1/go.mod h1:leENX2cUv7Sv2qDgdi0D0fCftN8fRC67Bcn8pqzeYNI= +github.com/moricho/tparallel v0.3.2 h1:odr8aZVFA3NZrNybggMkYO3rgPRcqjeQUlBBFVxKHTI= +github.com/moricho/tparallel v0.3.2/go.mod h1:OQ+K3b4Ln3l2TZveGCywybl68glfLEwFGqvnjok8b+U= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007 h1:28i1IjGcx8AofiB4N3q5Yls55VEaitzuEPkFJEVgGkA= github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= -github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA= -github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/ncw/swift v1.0.53 h1:luHjjTNtekIEvHg5KdAFIBaH7bWfNkefwFnpDffSIks= github.com/ncw/swift v1.0.53/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= -github.com/nishanths/exhaustive v0.11.0 h1:T3I8nUGhl/Cwu5Z2hfc92l0e04D2GEW6e0l8pzda2l0= -github.com/nishanths/exhaustive v0.11.0/go.mod h1:RqwDsZ1xY0dNdqHho2z6X+bgzizwbLYOWnZbbl2wLB4= +github.com/nishanths/exhaustive v0.12.0 h1:vIY9sALmw6T/yxiASewa4TQcFsVYZQQRUQJhKRf3Swg= +github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs= github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= -github.com/nunnatsa/ginkgolinter v0.12.1 h1:vwOqb5Nu05OikTXqhvLdHCGcx5uthIYIl0t79UVrERQ= -github.com/nunnatsa/ginkgolinter v0.12.1/go.mod h1:AK8Ab1PypVrcGUusuKD8RDcl2KgsIwvNaaxAlyHSzso= +github.com/nunnatsa/ginkgolinter v0.16.2 h1:8iLqHIZvN4fTLDC0Ke9tbSZVcyVHoBs0HIbnVSxfHJk= +github.com/nunnatsa/ginkgolinter v0.16.2/go.mod h1:4tWRinDN1FeJgU+iJANW/kz7xKN5nYRAOfJDQUS9dOQ= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= -github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= -github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= -github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= -github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k= +github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4= +github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag= +github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= +github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= +github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pascaldekloe/name v0.0.0-20180628100202-0fd16699aae1 h1:/I3lTljEEDNYLho3/FUB7iD/oc2cEFgVmbHzV+O0PtU= github.com/pascaldekloe/name v0.0.0-20180628100202-0fd16699aae1/go.mod h1:eD5JxqMiuNYyFNmyY9rkJ/slN8y59oEu4Ei7F8OoKWQ= -github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= -github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v1.4.2 h1:CU+O4181IxFDdPH6t/HT7IiDj1I7zxNi1RIUxYwn8d0= -github.com/polyfloyd/go-errorlint v1.4.2/go.mod h1:k6fU/+fQe38ednoZS51T7gSIGQW1y94d6TkSr35OzH8= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/polyfloyd/go-errorlint v1.6.0 h1:tftWV9DE7txiFzPpztTAwyoRLKNj9gpVm2cg8/OwcYY= +github.com/polyfloyd/go-errorlint v1.6.0/go.mod h1:HR7u8wuP1kb1NeN1zqTd1ZMlqUKPPHF+Id4vIPvDqVw= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/pseudomuto/protokit v0.2.0 h1:hlnBDcy3YEDXH7kc9gV+NLaN0cDzhDvD1s7Y6FZ8RpM= github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= -github.com/quasilyte/go-ruleguard v0.3.19 h1:tfMnabXle/HzOb5Xe9CUZYWXKfkS1KwRmZyPmD9nVcc= -github.com/quasilyte/go-ruleguard v0.3.19/go.mod h1:lHSn69Scl48I7Gt9cX3VrbsZYvYiBYszZOZW4A+oTEw= +github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 h1:+Wl/0aFp0hpuHM3H//KMft64WQ1yX9LdJY64Qm/gFCo= +github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI= +github.com/quasilyte/go-ruleguard/dsl v0.3.22 h1:wd8zkOhSNr+I+8Qeciml08ivDt1pSXe60+5DqOpCjPE= +github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl980XxGFEZSS6KlBGIV0diGdySzxATTWoqaU= github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.29.0 h1:Zes4hju04hjbvkVkOhdl2HpZa+0PmVwigmo8XoORE5w= github.com/rs/zerolog v1.29.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.3.0 h1:q15RT/pd6UggBXVBuLps8BXRvl5GPBcwVA7BJHMLuTw= -github.com/ryancurrah/gomodguard v1.3.0/go.mod h1:ggBxb3luypPEzqVtq33ee7YSN35V28XeGnid8dnni50= -github.com/ryanrolds/sqlclosecheck v0.4.0 h1:i8SX60Rppc1wRuyQjMciLqIzV3xnoHB7/tXbr6RGYNI= -github.com/ryanrolds/sqlclosecheck v0.4.0/go.mod h1:TBRRjzL31JONc9i4XMinicuo+s+E8yKZ5FN8X3G6CKQ= +github.com/ryancurrah/gomodguard v1.3.5 h1:cShyguSwUEeC0jS7ylOiG/idnd1TpJ1LfHGpV3oJmPU= +github.com/ryancurrah/gomodguard v1.3.5/go.mod h1:MXlEPQRxgfPQa62O8wzK3Ozbkv9Rkqr+wKjSxTdsNJE= +github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU= +github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc= github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= +github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4= +github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= -github.com/sashamelentyev/usestdlibvars v1.23.0 h1:01h+/2Kd+NblNItNeux0veSL5cBF1jbEOPrEhDzGYq0= -github.com/sashamelentyev/usestdlibvars v1.23.0/go.mod h1:YPwr/Y1LATzHI93CqoPUN/2BzGQ/6N/cl/KwgR0B/aU= -github.com/securego/gosec/v2 v2.16.0 h1:Pi0JKoasQQ3NnoRao/ww/N/XdynIB9NRYYZT5CyOs5U= -github.com/securego/gosec/v2 v2.16.0/go.mod h1:xvLcVZqUfo4aAQu56TNv7/Ltz6emAOQAEsrZrt7uGlI= +github.com/sashamelentyev/usestdlibvars v1.27.0 h1:t/3jZpSXtRPRf2xr0m63i32ZrusyurIGT9E5wAvXQnI= +github.com/sashamelentyev/usestdlibvars v1.27.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8= +github.com/securego/gosec/v2 v2.21.2 h1:deZp5zmYf3TWwU7A7cR2+SolbTpZ3HQiwFqnzQyEl3M= +github.com/securego/gosec/v2 v2.21.2/go.mod h1:au33kg78rNseF5PwPnTWhuYBFf534bvJRvOrgZ/bFzU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE= github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= -github.com/sivchari/nosnakecase v1.7.0 h1:7QkpWIRMe8x25gckkFd2A5Pi6Ymo0qgr4JrhGt95do8= -github.com/sivchari/nosnakecase v1.7.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY= -github.com/sivchari/tenv v1.7.1 h1:PSpuD4bu6fSmtWMxSGWcvqUUgIn7k3yOJhOIzVWn8Ak= -github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= +github.com/sivchari/tenv v1.10.0 h1:g/hzMA+dBCKqGXgW8AV/1xIWhAvDrx0zFKNR48NFMg0= +github.com/sivchari/tenv v1.10.0/go.mod h1:tdY24masnVoZFxYrHv/nD6Tc8FbkEtAQEEziXpyMgqY= github.com/sonatard/noctx v0.0.2 h1:L7Dz4De2zDQhW8S0t+KUjY0MAQJd6SgVwhzNIc4ok00= github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo= github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= -github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -655,13 +535,11 @@ github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRk github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -670,45 +548,43 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= -github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c h1:+aPplBwWcHBo6q9xrfWdMrT9o4kltkmmvpemgIjep/8= -github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk= github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9qkHM= github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg= github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= -github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw= -github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= +github.com/tetafro/godot v1.4.17 h1:pGzu+Ye7ZUEFx7LHU0dAKmCOXWsPjl7qA6iMGndsjPs= +github.com/tetafro/godot v1.4.17/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 h1:quvGphlmUVU+nhpFa4gg4yJyTRJ13reZMDHrKwYw53M= github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ= github.com/timonwong/loggercheck v0.9.4 h1:HKKhqrjcVj8sxL7K77beXh0adEm6DLjV/QOGeMXEVi4= github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg= -github.com/tomarrell/wrapcheck/v2 v2.8.1 h1:HxSqDSN0sAt0yJYsrcYVoEeyM4aI9yAm3KQpIXDJRhQ= -github.com/tomarrell/wrapcheck/v2 v2.8.1/go.mod h1:/n2Q3NZ4XFT50ho6Hbxg+RV1uyo2Uow/Vdm9NQcl5SE= +github.com/tomarrell/wrapcheck/v2 v2.9.0 h1:801U2YCAjLhdN8zhZ/7tdjB3EnAoRlJHt/s+9hijLQ4= +github.com/tomarrell/wrapcheck/v2 v2.9.0/go.mod h1:g9vNIyhb5/9TQgumxQyOEqDHsmGYcGsVMOx/xGkqdMo= github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= -github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA= -github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqzi/CzI= -github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= -github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842Y= -github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY= +github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI= +github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4= +github.com/ultraware/whitespace v0.1.1 h1:bTPOGejYFulW3PkcrqkeQwOd6NKOOXvmGD9bo/Gk8VQ= +github.com/ultraware/whitespace v0.1.1/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8= +github.com/uudashr/gocognit v1.1.3 h1:l+a111VcDbKfynh+airAy/DJQKaXh2m9vkoysMPSZyM= +github.com/uudashr/gocognit v1.1.3/go.mod h1:aKH8/e8xbTRBwjbCkwZ8qt4l2EpKXl31KMHgSS+lZ2U= github.com/vektra/mockery/v2 v2.40.3 h1:IZ2lydSDFsY0khnEsbSu13VLcqSsa6UYSS/8F+uOJmo= github.com/vektra/mockery/v2 v2.40.3/go.mod h1:KYBZF/7sqOa86BaOZPYsoCZWEWLS90a5oBLg2pVudxY= -github.com/xen0n/gosmopolitan v1.2.1 h1:3pttnTuFumELBRSh+KQs1zcz4fN6Zy7aB0xlnQSn1Iw= -github.com/xen0n/gosmopolitan v1.2.1/go.mod h1:JsHq/Brs1o050OOdmzHeOr0N7OtlnKRAGAsElF8xBQA= +github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HHtvU= +github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg= github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= -github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o= -github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA= -github.com/ykadowak/zerologlint v0.1.2 h1:Um4P5RMmelfjQqQJKtE8ZW+dLZrXrENeIzWWKw800U4= -github.com/ykadowak/zerologlint v0.1.2/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg= +github.com/yeya24/promlinter v0.3.0 h1:JVDbMp08lVCP7Y6NP3qHroGAO6z2yGKQtS5JsjqtoFs= +github.com/yeya24/promlinter v0.3.0/go.mod h1:cDfJQQYv9uYciW60QT0eeHlFodotkYZlL+YcPQN+mW4= +github.com/ykadowak/zerologlint v0.1.5 h1:Gy/fMz1dFQN9JZTPjv1hxEk+sRWm05row04Yoolgdiw= +github.com/ykadowak/zerologlint v0.1.5/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -716,36 +592,34 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -gitlab.com/bosi/decorder v0.2.3 h1:gX4/RgK16ijY8V+BRQHAySfQAb354T7/xQpDB2n10P0= -gitlab.com/bosi/decorder v0.2.3/go.mod h1:9K1RB5+VPNQYtXtTDAzd2OEftsZb1oV0IrJrzChSdGE= -go-simpler.org/assert v0.5.0 h1:+5L/lajuQtzmbtEfh69sr5cRf2/xZzyJhFjoOz/PPqs= -go-simpler.org/assert v0.5.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +gitlab.com/bosi/decorder v0.4.2 h1:qbQaV3zgwnBZ4zPMhGLW4KZe7A7NwxEhJx39R3shffo= +gitlab.com/bosi/decorder v0.4.2/go.mod h1:muuhHoaJkA9QLcYHq4Mj8FJUwDZ+EirSHRiaTcTf6T8= +go-simpler.org/assert v0.9.0 h1:PfpmcSvL7yAnWyChSjOz6Sp6m9j5lyK8Ok9pEL31YkQ= +go-simpler.org/assert v0.9.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28= +go-simpler.org/musttag v0.12.2 h1:J7lRc2ysXOq7eM8rwaTYnNrHd5JwjppzB6mScysB2Cs= +go-simpler.org/musttag v0.12.2/go.mod h1:uN1DVIasMTQKk6XSik7yrJoEysGtR2GRqvWnI9S7TYM= +go-simpler.org/sloglint v0.7.2 h1:Wc9Em/Zeuu7JYpl+oKoYOsQSy2X560aVueCW/m6IijY= +go-simpler.org/sloglint v0.7.2/go.mod h1:US+9C80ppl7VsThQclkM7BkCHQAzuz8kHLsW3ppuluo= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= -go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.19.0 h1:Nw7Dv4lwvGrI68+wULbcq7su9K2cebeCUrDjVrUJHxM= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.19.0/go.mod h1:1MsF6Y7gTqosgoZvHlzcaaM8DIMNZgJh87ykokoNH7Y= -go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= -go.tmz.dev/musttag v0.7.0 h1:QfytzjTWGXZmChoX0L++7uQN+yRCPfyFm+whsM+lfGc= -go.tmz.dev/musttag v0.7.0/go.mod h1:oTFPvgOkJmp5kYL02S8+jrH0eLrBIl57rzWeA26zDEM= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= +go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= +go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -753,56 +627,25 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea h1:vLCWI/yYrdEHyN2JzIzPO3aaQJHQdp89IZBA/+azVC4= -golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= +golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e h1:I88y4caeGeuDQxgdoFPUq097j7kNfw6uvuiNxUBfcBk= +golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20230224173230-c95f2b4c22f2 h1:J74nGeMgeFnYQJN59eFwh06jX/V8g0lB7LWpjSLxtgU= -golang.org/x/exp/typeparams v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8= +golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= @@ -811,135 +654,63 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -948,93 +719,46 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524210228-3d17549cdc6b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= @@ -1042,113 +766,40 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.155.0 h1:vBmGhCYs0djJttDNynWo44zosHlPvHmA0XiN2zP2DtA= -google.golang.org/api v0.155.0/go.mod h1:GI5qK5f40kCpHfPn6+YzGAByIKWv8ujFnmoWm7Igduk= +google.golang.org/api v0.196.0 h1:k/RafYqebaIJBO3+SMnfEGtFVlvp5vSgqTUF54UN/zg= +google.golang.org/api v0.196.0/go.mod h1:g9IL21uGkYgvQ5BZg6BAtoGJQIm8r6EgaAbpNey5wBE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= +google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed h1:3RgNmBoI9MZhsj3QxC+AP/qQhNwpCLOvYDYYsFrhFt0= +google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= +google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1157,43 +808,27 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.4.3 h1:o/n5/K5gXqk8Gozvs2cnL0F2S1/g1vcGCAx2vETjITw= -honnef.co/go/tools v0.4.3/go.mod h1:36ZgoUOrqOk1GxwHhyryEkq8FQWkUO2xGuSMhUCcdvA= +honnef.co/go/tools v0.5.1 h1:4bH5o3b5ZULQ4UrBmP+63W9r7qIkqJClEA9ko5YKx+I= +honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs= k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw= k8s.io/api v0.28.2/go.mod h1:RVnJBsjU8tcMq7C3iaRSGMeaKt2TWEUXcpIt/90fjEg= k8s.io/apiextensions-apiserver v0.28.0 h1:CszgmBL8CizEnj4sj7/PtLGey6Na3YgWyGCPONv7E9E= @@ -1210,17 +845,10 @@ k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5Ohx k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -mvdan.cc/gofumpt v0.5.0 h1:0EQ+Z56k8tXjj/6TQD25BFNKQXpCvT0rnansIc7Ug5E= -mvdan.cc/gofumpt v0.5.0/go.mod h1:HBeVDtMKRZpXyxFciAirzdKklDlGu8aAy1wEbH5Y9js= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d h1:3rvTIIM22r9pvXk+q3swxUQAQOxksVMGK7sml4nG57w= -mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d/go.mod h1:IeHQjmn6TOD+e4Z3RFiZMMsLVL+A96Nvptar8Fj71is= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +mvdan.cc/gofumpt v0.7.0 h1:bg91ttqXmi9y2xawvkuMXyvAA/1ZGJqYAEGjXuP0JXU= +mvdan.cc/gofumpt v0.7.0/go.mod h1:txVFJy/Sc/mvaycET54pV8SW8gWxTlUuGHVEcncmNUo= +mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f h1:lMpcwN6GxNbWtbpI1+xzFLSW8XzX0u72NttUGVFjO3U= +mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f/go.mod h1:RSLa7mKKCNeTTMHBw5Hsy2rfJmd6O2ivt9Dw9ZqCQpQ= sigs.k8s.io/controller-runtime v0.16.2 h1:mwXAVuEk3EQf478PQwQ48zGOXvW27UJc8NHktQVuIPU= sigs.k8s.io/controller-runtime v0.16.2/go.mod h1:vpMu3LpI5sYWtujJOa2uPK61nB5rbwlN7BAB8aSLvGU= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/boilerplate/flyte/golang_test_targets/Makefile b/boilerplate/flyte/golang_test_targets/Makefile index c02409a318..6492014917 100644 --- a/boilerplate/flyte/golang_test_targets/Makefile +++ b/boilerplate/flyte/golang_test_targets/Makefile @@ -15,7 +15,7 @@ generate: download_tooling #generate go code .PHONY: lint lint: download_tooling #lints the package for common code smells - GL_DEBUG=linters_output,env golangci-lint run $(LINT_FLAGS) --deadline=5m --exclude deprecated -v + GL_DEBUG=linters_output,env golangci-lint run $(LINT_FLAGS) --timeout=5m --exclude deprecated -v .PHONY: lint-fix lint-fix: LINT_FLAGS=--fix diff --git a/charts/flyte-binary/README.md b/charts/flyte-binary/README.md index e7df1018db..9ab746d48d 100644 --- a/charts/flyte-binary/README.md +++ b/charts/flyte-binary/README.md @@ -63,6 +63,7 @@ Chart for basic single Flyte executable deployment | configuration.logging.plugins.kubernetes.templateUri | string | `""` | | | configuration.logging.plugins.stackdriver.enabled | bool | `false` | | | configuration.logging.plugins.stackdriver.templateUri | string | `""` | | +| configuration.propeller.createCRDs | bool | `true` | | | configuration.storage.metadataContainer | string | `"my-organization-flyte-container"` | | | configuration.storage.provider | string | `"s3"` | | | configuration.storage.providerConfig.azure.account | string | `"storage-account-name"` | | diff --git a/charts/flyte-binary/templates/configmap.yaml b/charts/flyte-binary/templates/configmap.yaml index 255da9fdf3..3360fdc0c4 100644 --- a/charts/flyte-binary/templates/configmap.yaml +++ b/charts/flyte-binary/templates/configmap.yaml @@ -40,7 +40,7 @@ data: show-source: true level: {{ default 1 .Values.configuration.logging.level }} propeller: - create-flyteworkflow-crd: true + create-flyteworkflow-crd: {{ .Values.configuration.propeller.createCRDs }} webhook: certDir: /var/run/flyte/certs localCert: true diff --git a/charts/flyte-binary/templates/crds/flyteworkflow.yaml b/charts/flyte-binary/templates/crds/flyteworkflow.yaml new file mode 100644 index 0000000000..3e5167d6ac --- /dev/null +++ b/charts/flyte-binary/templates/crds/flyteworkflow.yaml @@ -0,0 +1,32 @@ +{{- if not .Values.configuration.propeller.createCRDs }} +{{- if $.Capabilities.APIVersions.Has "apiextensions.k8s.io/v1/CustomResourceDefinition" }} +apiVersion: apiextensions.k8s.io/v1 +{{- else }} +apiVersion: apiextensions.k8s.io/v1beta1 +{{- end }} +kind: CustomResourceDefinition +metadata: + name: flyteworkflows.flyte.lyft.com +spec: + group: flyte.lyft.com + names: + kind: FlyteWorkflow + plural: flyteworkflows + shortNames: + - fly + singular: flyteworkflow + scope: Namespaced +{{- if $.Capabilities.APIVersions.Has "apiextensions.k8s.io/v1/CustomResourceDefinition" }} + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true + properties: +{{- else }} + version: v1alpha1 +{{- end }} +{{- end }} diff --git a/charts/flyte-binary/values.yaml b/charts/flyte-binary/values.yaml index eee01d16c6..97fa1fe8d3 100644 --- a/charts/flyte-binary/values.yaml +++ b/charts/flyte-binary/values.yaml @@ -176,6 +176,10 @@ configuration: timeouts: GetTask: 10s defaultTimeout: 10s + # propeller Specify configuration for Flyte Propeller + propeller: + # createCRDs If true, Propeller will install CRDs at runtime, if false, CRDs will be installed during helm install + createCRDs: true # externalConfigMap Specify an existing, external ConfigMap to use as configuration for Flyte # If set, no Flyte configuration will be generated by this chart externalConfigMap: "" diff --git a/charts/flyte-core/README.md b/charts/flyte-core/README.md index 0ec5d92192..c633f19959 100644 --- a/charts/flyte-core/README.md +++ b/charts/flyte-core/README.md @@ -178,6 +178,7 @@ helm install gateway bitnami/contour -n flyte | flyteadmin.configPath | string | `"/etc/flyte/config/*.yaml"` | Default regex string for searching configuration files | | flyteadmin.enabled | bool | `true` | | | flyteadmin.env | list | `[]` | Additional flyteadmin container environment variables e.g. SendGrid's API key - name: SENDGRID_API_KEY value: "" e.g. secret environment variable (you can combine it with .additionalVolumes): - name: SENDGRID_API_KEY valueFrom: secretKeyRef: name: sendgrid-secret key: api_key | +| flyteadmin.envFrom | list | `[]` | Additional flyteadmin environment variables from a reference (ie: Secret or ConfigMap) | | flyteadmin.extraArgs | object | `{}` | Appends extra command line arguments to the serve command | | flyteadmin.image.pullPolicy | string | `"IfNotPresent"` | | | flyteadmin.image.repository | string | `"cr.flyte.org/flyteorg/flyteadmin"` | Docker image for Flyteadmin deployment | @@ -191,7 +192,7 @@ helm install gateway bitnami/contour -n flyte | flyteadmin.resources | object | `{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}}` | Default resources requests and limits for Flyteadmin deployment | | flyteadmin.secrets | object | `{}` | | | flyteadmin.securityContext | object | `{"fsGroup":65534,"fsGroupChangePolicy":"Always","runAsNonRoot":true,"runAsUser":1001,"seLinuxOptions":{"type":"spc_t"}}` | Sets securityContext for flyteadmin pod(s). | -| flyteadmin.service | object | `{"additionalPorts":[],"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"loadBalancerSourceRanges":[],"type":"ClusterIP"}` | Service settings for Flyteadmin | +| flyteadmin.service | object | `{"additionalPorts":[],"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"appProtocols":{"enabled":false},"loadBalancerSourceRanges":[],"type":"ClusterIP"}` | Service settings for Flyteadmin | | flyteadmin.service.additionalPorts | list | `[]` | Appends additional ports to the service spec. | | flyteadmin.serviceAccount | object | `{"alwaysCreate":false,"annotations":{},"clusterRole":{"apiGroups":["","flyte.lyft.com","rbac.authorization.k8s.io"],"resources":["configmaps","flyteworkflows","namespaces","pods","resourcequotas","roles","rolebindings","secrets","services","serviceaccounts","spark-role","limitranges"],"verbs":["*"]},"create":true,"createClusterRole":true,"imagePullSecrets":[]}` | Configuration for service accounts for FlyteAdmin | | flyteadmin.serviceAccount.alwaysCreate | bool | `false` | Should a service account always be created for flyteadmin even without an actual flyteadmin deployment running (e.g. for multi-cluster setups) | @@ -234,7 +235,7 @@ helm install gateway bitnami/contour -n flyte | flyteconsole.replicaCount | int | `1` | Replicas count for Flyteconsole deployment | | flyteconsole.resources | object | `{"limits":{"cpu":"500m","memory":"250Mi"},"requests":{"cpu":"10m","memory":"50Mi"}}` | Default resources requests and limits for Flyteconsole deployment | | flyteconsole.securityContext | object | `{"fsGroupChangePolicy":"OnRootMismatch","runAsNonRoot":true,"runAsUser":1000,"seLinuxOptions":{"type":"spc_t"}}` | Sets securityContext for flyteconsole pod(s). | -| flyteconsole.service | object | `{"annotations":{},"type":"ClusterIP"}` | Service settings for Flyteconsole | +| flyteconsole.service | object | `{"annotations":{},"appProtocols":{"enabled":false},"type":"ClusterIP"}` | Service settings for Flyteconsole | | flyteconsole.serviceMonitor | object | `{"enabled":false,"interval":"60s","labels":{},"scrapeTimeout":"30s"}` | Settings for flyteconsole service monitor | | flyteconsole.serviceMonitor.enabled | bool | `false` | If enabled create the flyteconsole service monitor | | flyteconsole.serviceMonitor.interval | string | `"60s"` | Sets the interval at which metrics will be scraped by prometheus | diff --git a/charts/flyte-core/templates/admin/deployment.yaml b/charts/flyte-core/templates/admin/deployment.yaml index 23ea9966df..cc6877a793 100755 --- a/charts/flyte-core/templates/admin/deployment.yaml +++ b/charts/flyte-core/templates/admin/deployment.yaml @@ -46,7 +46,13 @@ spec: {{- with .Values.flyteadmin.env -}} {{- tpl (toYaml .) $ | nindent 12 }} {{- end }} + {{- end }} + {{- if .Values.flyteadmin.envFrom }} + envFrom: + {{- with .Values.flyteadmin.envFrom -}} + {{- tpl (toYaml .) $ | nindent 12 }} {{- end }} + {{- end }} {{- if .Values.flyteadmin.initialProjects }} - command: - flyteadmin diff --git a/charts/flyte-core/templates/admin/service.yaml b/charts/flyte-core/templates/admin/service.yaml index 9974fcdc4d..26e85c97f2 100644 --- a/charts/flyte-core/templates/admin/service.yaml +++ b/charts/flyte-core/templates/admin/service.yaml @@ -20,22 +20,30 @@ spec: - name: http port: 80 protocol: TCP + {{- if .Values.flyteadmin.service.appProtocols.enabled }} appProtocol: TCP + {{- end }} targetPort: 8088 - name: grpc port: 81 protocol: TCP # intentionally set to TCP instead of grpc + {{- if .Values.flyteadmin.service.appProtocols.enabled }} appProtocol: TCP + {{- end }} targetPort: 8089 - name: redoc protocol: TCP + {{- if .Values.flyteadmin.service.appProtocols.enabled }} appProtocol: TCP + {{- end }} port: 87 targetPort: 8087 - name: http-metrics protocol: TCP + {{- if .Values.flyteadmin.service.appProtocols.enabled }} appProtocol: TCP + {{- end }} port: 10254 {{- with .Values.flyteadmin.service.additionalPorts -}} {{ tpl (toYaml .) $ | nindent 4 }} diff --git a/charts/flyte-core/templates/console/service.yaml b/charts/flyte-core/templates/console/service.yaml index 7760cb6fcc..756fa0c7c9 100644 --- a/charts/flyte-core/templates/console/service.yaml +++ b/charts/flyte-core/templates/console/service.yaml @@ -16,7 +16,9 @@ spec: - name: http port: 80 protocol: TCP + {{- if .Values.flyteconsole.service.appProtocols.enabled }} appProtocol: TCP + {{- end }} targetPort: 8080 {{- if .Values.flyteconsole.serviceMonitor.enabled }} - name: http-metrics diff --git a/charts/flyte-core/values.yaml b/charts/flyte-core/values.yaml index bbcc39d57a..549e0da249 100755 --- a/charts/flyte-core/values.yaml +++ b/charts/flyte-core/values.yaml @@ -31,6 +31,8 @@ flyteadmin: # name: sendgrid-secret # key: api_key env: [] + # -- Additional flyteadmin environment variables from a reference (ie: Secret or ConfigMap) + envFrom: [] # -- Default resources requests and limits for Flyteadmin deployment resources: limits: @@ -50,6 +52,8 @@ flyteadmin: - flyteexamples # -- Service settings for Flyteadmin service: + appProtocols: + enabled: false annotations: projectcontour.io/upstream-protocol.h2c: grpc type: ClusterIP @@ -418,6 +422,8 @@ flyteconsole: memory: 50Mi # -- Service settings for Flyteconsole service: + appProtocols: + enabled: false annotations: {} type: ClusterIP # -- Annotations for Flyteconsole pods diff --git a/charts/flyte-sandbox/README.md b/charts/flyte-sandbox/README.md index 7820ed2768..c316199b57 100644 --- a/charts/flyte-sandbox/README.md +++ b/charts/flyte-sandbox/README.md @@ -30,6 +30,8 @@ A Helm chart for the Flyte local sandbox | flyte-binary.configuration.inline.plugins.k8s.default-env-vars[0].FLYTE_AWS_ENDPOINT | string | `"http://{{ printf \"%s-minio\" .Release.Name | trunc 63 | trimSuffix \"-\" }}.{{ .Release.Namespace }}:9000"` | | | flyte-binary.configuration.inline.plugins.k8s.default-env-vars[1].FLYTE_AWS_ACCESS_KEY_ID | string | `"minio"` | | | flyte-binary.configuration.inline.plugins.k8s.default-env-vars[2].FLYTE_AWS_SECRET_ACCESS_KEY | string | `"miniostorage"` | | +| flyte-binary.configuration.inline.plugins.k8s.default-env-vars[3].FLYTE_PLATFORM_URL | string | `"{{ printf \"%s-grpc\" .Release.Name }}.{{ .Release.Namespace }}:8089"` | | +| flyte-binary.configuration.inline.plugins.k8s.default-env-vars[4].FLYTE_PLATFORM_INSECURE | bool | `true` | | | flyte-binary.configuration.inline.storage.signedURL.stowConfigOverride.endpoint | string | `"http://localhost:30002"` | | | flyte-binary.configuration.inline.task_resources.defaults.cpu | string | `"500m"` | | | flyte-binary.configuration.inline.task_resources.defaults.ephemeralStorage | int | `0` | | diff --git a/charts/flyte-sandbox/values.yaml b/charts/flyte-sandbox/values.yaml index 9743bcab33..314c8f8bb2 100644 --- a/charts/flyte-sandbox/values.yaml +++ b/charts/flyte-sandbox/values.yaml @@ -57,6 +57,8 @@ flyte-binary: - FLYTE_AWS_ENDPOINT: http://{{ printf "%s-minio" .Release.Name | trunc 63 | trimSuffix "-" }}.{{ .Release.Namespace }}:9000 - FLYTE_AWS_ACCESS_KEY_ID: minio - FLYTE_AWS_SECRET_ACCESS_KEY: miniostorage + - FLYTE_PLATFORM_URL: '{{ printf "%s-grpc" .Release.Name }}.{{ .Release.Namespace }}:8089' + - FLYTE_PLATFORM_INSECURE: True inlineConfigMap: '{{ include "flyte-sandbox.configuration.inlineConfigMap" . }}' clusterResourceTemplates: inlineConfigMap: '{{ include "flyte-sandbox.clusterResourceTemplates.inlineConfigMap" . }}' diff --git a/datacatalog/.golangci.yml b/datacatalog/.golangci.yml index 6d13f4a3b6..77107079d0 100644 --- a/datacatalog/.golangci.yml +++ b/datacatalog/.golangci.yml @@ -1,35 +1,25 @@ -# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. -# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY: -# -# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst - run: skip-dirs: - pkg/client - linters: disable-all: true enable: - - deadcode - errcheck - - gas + - gosec - gci - goconst - goimports - - golint - gosimple - govet - ineffassign - misspell - nakedret - staticcheck - - structcheck - typecheck - unconvert - unparam - unused - - varcheck - + - protogetter linters-settings: gci: custom-order: true @@ -38,6 +28,8 @@ linters-settings: - default - prefix(github.com/flyteorg) skip-generated: true + goconst: + ignore-tests: true issues: exclude: - copylocks diff --git a/datacatalog/pkg/manager/impl/artifact_data_store.go b/datacatalog/pkg/manager/impl/artifact_data_store.go index 5cbd3cc3e0..fa4a14c903 100644 --- a/datacatalog/pkg/manager/impl/artifact_data_store.go +++ b/datacatalog/pkg/manager/impl/artifact_data_store.go @@ -27,8 +27,8 @@ type artifactDataStore struct { } func (m *artifactDataStore) getDataLocation(ctx context.Context, artifact *datacatalog.Artifact, data *datacatalog.ArtifactData) (storage.DataReference, error) { - dataset := artifact.Dataset - return m.store.ConstructReference(ctx, m.storagePrefix, dataset.Project, dataset.Domain, dataset.Name, dataset.Version, artifact.Id, data.Name, artifactDataFile) + dataset := artifact.GetDataset() + return m.store.ConstructReference(ctx, m.storagePrefix, dataset.GetProject(), dataset.GetDomain(), dataset.GetName(), dataset.GetVersion(), artifact.GetId(), data.GetName(), artifactDataFile) } // Store marshalled data in data.pb under the storage prefix @@ -37,7 +37,7 @@ func (m *artifactDataStore) PutData(ctx context.Context, artifact *datacatalog.A if err != nil { return "", errors.NewDataCatalogErrorf(codes.Internal, "Unable to generate data location %s, err %v", dataLocation.String(), err) } - err = m.store.WriteProtobuf(ctx, dataLocation, storage.Options{}, data.Value) + err = m.store.WriteProtobuf(ctx, dataLocation, storage.Options{}, data.GetValue()) if err != nil { return "", errors.NewDataCatalogErrorf(codes.Internal, "Unable to store artifact data in location %s, err %v", dataLocation.String(), err) } diff --git a/datacatalog/pkg/manager/impl/artifact_manager.go b/datacatalog/pkg/manager/impl/artifact_manager.go index 40f3f40538..f32cb3f31f 100644 --- a/datacatalog/pkg/manager/impl/artifact_manager.go +++ b/datacatalog/pkg/manager/impl/artifact_manager.go @@ -58,7 +58,7 @@ func (m *artifactManager) CreateArtifact(ctx context.Context, request *datacatal timer := m.systemMetrics.createResponseTime.Start(ctx) defer timer.Stop() - artifact := request.Artifact + artifact := request.GetArtifact() err := validators.ValidateArtifact(artifact) if err != nil { logger.Warningf(ctx, "Invalid create artifact request %v, err: %v", request, err) @@ -66,8 +66,8 @@ func (m *artifactManager) CreateArtifact(ctx context.Context, request *datacatal return nil, err } - ctx = contextutils.WithProjectDomain(ctx, artifact.Dataset.Project, artifact.Dataset.Domain) - datasetKey := transformers.FromDatasetID(artifact.Dataset) + ctx = contextutils.WithProjectDomain(ctx, artifact.GetDataset().GetProject(), artifact.GetDataset().GetDomain()) + datasetKey := transformers.FromDatasetID(artifact.GetDataset()) // The dataset must exist for the artifact, let's verify that first dataset, err := m.repo.DatasetRepo().Get(ctx, datasetKey) @@ -80,16 +80,16 @@ func (m *artifactManager) CreateArtifact(ctx context.Context, request *datacatal // TODO: when adding a tag, need to verify one tag per partition combo // check that the artifact's partitions are the same partition values of the dataset datasetPartitionKeys := transformers.FromPartitionKeyModel(dataset.PartitionKeys) - err = validators.ValidatePartitions(datasetPartitionKeys, artifact.Partitions) + err = validators.ValidatePartitions(datasetPartitionKeys, artifact.GetPartitions()) if err != nil { - logger.Warnf(ctx, "Invalid artifact partitions %v, err: %+v", artifact.Partitions, err) + logger.Warnf(ctx, "Invalid artifact partitions %v, err: %+v", artifact.GetPartitions(), err) m.systemMetrics.createFailureCounter.Inc(ctx) return nil, err } // create Artifact Data offloaded storage files - artifactDataModels := make([]models.ArtifactData, len(request.Artifact.Data)) - for i, artifactData := range request.Artifact.Data { + artifactDataModels := make([]models.ArtifactData, len(request.GetArtifact().GetData())) + for i, artifactData := range request.GetArtifact().GetData() { dataLocation, err := m.artifactStore.PutData(ctx, artifact, artifactData) if err != nil { logger.Errorf(ctx, "Failed to store artifact data err: %v", err) @@ -97,12 +97,12 @@ func (m *artifactManager) CreateArtifact(ctx context.Context, request *datacatal return nil, err } - artifactDataModels[i].Name = artifactData.Name + artifactDataModels[i].Name = artifactData.GetName() artifactDataModels[i].Location = dataLocation.String() m.systemMetrics.createDataSuccessCounter.Inc(ctx) } - logger.Debugf(ctx, "Stored %v data for artifact %+v", len(artifactDataModels), artifact.Id) + logger.Debugf(ctx, "Stored %v data for artifact %+v", len(artifactDataModels), artifact.GetId()) artifactModel, err := transformers.CreateArtifactModel(request, artifactDataModels, dataset) if err != nil { @@ -114,7 +114,7 @@ func (m *artifactManager) CreateArtifact(ctx context.Context, request *datacatal err = m.repo.ArtifactRepo().Create(ctx, artifactModel) if err != nil { if errors.IsAlreadyExistsError(err) { - logger.Warnf(ctx, "Artifact already exists key: %+v, err %v", artifact.Id, err) + logger.Warnf(ctx, "Artifact already exists key: %+v, err %v", artifact.GetId(), err) m.systemMetrics.alreadyExistsCounter.Inc(ctx) } else { logger.Errorf(ctx, "Failed to create artifact %v, err: %v", artifactDataModels, err) @@ -123,7 +123,7 @@ func (m *artifactManager) CreateArtifact(ctx context.Context, request *datacatal return nil, err } - logger.Debugf(ctx, "Successfully created artifact id: %v", artifact.Id) + logger.Debugf(ctx, "Successfully created artifact id: %v", artifact.GetId()) m.systemMetrics.createSuccessCounter.Inc(ctx) return &datacatalog.CreateArtifactResponse{}, nil @@ -141,7 +141,7 @@ func (m *artifactManager) GetArtifact(ctx context.Context, request *datacatalog. return nil, err } - datasetID := request.Dataset + datasetID := request.GetDataset() artifactModel, err := m.findArtifact(ctx, datasetID, request) if err != nil { @@ -164,7 +164,7 @@ func (m *artifactManager) GetArtifact(ctx context.Context, request *datacatalog. } artifact.Data = artifactDataList - logger.Debugf(ctx, "Retrieved artifact dataset %v, id: %v", artifact.Dataset, artifact.Id) + logger.Debugf(ctx, "Retrieved artifact dataset %v, id: %v", artifact.GetDataset(), artifact.GetId()) m.systemMetrics.getSuccessCounter.Inc(ctx) return &datacatalog.GetArtifactResponse{ Artifact: artifact, @@ -249,7 +249,7 @@ func (m *artifactManager) ListArtifacts(ctx context.Context, request *datacatalo } // Verify the dataset exists before listing artifacts - datasetKey := transformers.FromDatasetID(request.Dataset) + datasetKey := transformers.FromDatasetID(request.GetDataset()) dataset, err := m.repo.DatasetRepo().Get(ctx, datasetKey) if err != nil { logger.Warnf(ctx, "Failed to get dataset for listing artifacts %v, err: %v", datasetKey, err) @@ -265,7 +265,7 @@ func (m *artifactManager) ListArtifacts(ctx context.Context, request *datacatalo return nil, err } - err = transformers.ApplyPagination(request.Pagination, &listInput) + err = transformers.ApplyPagination(request.GetPagination(), &listInput) if err != nil { logger.Warningf(ctx, "Invalid pagination options in list artifact request %v, err: %v", request, err) m.systemMetrics.validationErrorCounter.Inc(ctx) @@ -311,7 +311,7 @@ func (m *artifactManager) ListArtifacts(ctx context.Context, request *datacatalo // stored data will be overwritten in the underlying blob storage, no longer existing data (based on ArtifactData name) // will be deleted. func (m *artifactManager) UpdateArtifact(ctx context.Context, request *datacatalog.UpdateArtifactRequest) (*datacatalog.UpdateArtifactResponse, error) { - ctx = contextutils.WithProjectDomain(ctx, request.Dataset.Project, request.Dataset.Domain) + ctx = contextutils.WithProjectDomain(ctx, request.GetDataset().GetProject(), request.GetDataset().GetDomain()) timer := m.systemMetrics.updateResponseTime.Start(ctx) defer timer.Stop() @@ -333,9 +333,9 @@ func (m *artifactManager) UpdateArtifact(ctx context.Context, request *datacatal } // artifactModel needs to be updated with new SerializedMetadata - serializedMetadata, err := transformers.SerializedMetadata(request.Metadata) + serializedMetadata, err := transformers.SerializedMetadata(request.GetMetadata()) if err != nil { - logger.Errorf(ctx, "Error in transforming Metadata from request %+v, err %v", request.Metadata, err) + logger.Errorf(ctx, "Error in transforming Metadata from request %+v, err %v", request.GetMetadata(), err) m.systemMetrics.transformerErrorCounter.Inc(ctx) m.systemMetrics.updateFailureCounter.Inc(ctx) return nil, err @@ -353,9 +353,9 @@ func (m *artifactManager) UpdateArtifact(ctx context.Context, request *datacatal // overwrite existing artifact data and upload new entries, building a map of artifact data names to remove // deleted entries from the blob storage after the upload completed artifactDataNames := make(map[string]struct{}) - artifactDataModels := make([]models.ArtifactData, len(request.Data)) - for i, artifactData := range request.Data { - artifactDataNames[artifactData.Name] = struct{}{} + artifactDataModels := make([]models.ArtifactData, len(request.GetData())) + for i, artifactData := range request.GetData() { + artifactDataNames[artifactData.GetName()] = struct{}{} dataLocation, err := m.artifactStore.PutData(ctx, artifact, artifactData) if err != nil { @@ -365,7 +365,7 @@ func (m *artifactManager) UpdateArtifact(ctx context.Context, request *datacatal return nil, err } - artifactDataModels[i].Name = artifactData.Name + artifactDataModels[i].Name = artifactData.GetName() artifactDataModels[i].Location = dataLocation.String() m.systemMetrics.updateDataSuccessCounter.Inc(ctx) } @@ -384,7 +384,7 @@ func (m *artifactManager) UpdateArtifact(ctx context.Context, request *datacatal err = m.repo.ArtifactRepo().Update(ctx, artifactModel) if err != nil { if errors.IsDoesNotExistError(err) { - logger.Warnf(ctx, "Artifact does not exist key: %+v, err %v", artifact.Id, err) + logger.Warnf(ctx, "Artifact does not exist key: %+v, err %v", artifact.GetId(), err) m.systemMetrics.doesNotExistCounter.Inc(ctx) } else { logger.Errorf(ctx, "Failed to update artifact %v, err: %v", artifactModel, err) @@ -408,11 +408,11 @@ func (m *artifactManager) UpdateArtifact(ctx context.Context, request *datacatal m.systemMetrics.deleteDataSuccessCounter.Inc(ctx) } - logger.Debugf(ctx, "Successfully updated artifact id: %v", artifact.Id) + logger.Debugf(ctx, "Successfully updated artifact id: %v", artifact.GetId()) m.systemMetrics.updateSuccessCounter.Inc(ctx) return &datacatalog.UpdateArtifactResponse{ - ArtifactId: artifact.Id, + ArtifactId: artifact.GetId(), }, nil } diff --git a/datacatalog/pkg/manager/impl/artifact_manager_test.go b/datacatalog/pkg/manager/impl/artifact_manager_test.go index 2bf39b04d9..0df125f1ec 100644 --- a/datacatalog/pkg/manager/impl/artifact_manager_test.go +++ b/datacatalog/pkg/manager/impl/artifact_manager_test.go @@ -107,50 +107,50 @@ func newMockDataCatalogRepo() *mocks.DataCatalogRepo { } func getExpectedDatastoreLocation(ctx context.Context, store *storage.DataStore, prefix storage.DataReference, artifact *datacatalog.Artifact, idx int) (storage.DataReference, error) { - return getExpectedDatastoreLocationFromName(ctx, store, prefix, artifact, artifact.Data[idx].Name) + return getExpectedDatastoreLocationFromName(ctx, store, prefix, artifact, artifact.GetData()[idx].GetName()) } func getExpectedDatastoreLocationFromName(ctx context.Context, store *storage.DataStore, prefix storage.DataReference, artifact *datacatalog.Artifact, artifactDataName string) (storage.DataReference, error) { - dataset := artifact.Dataset - return store.ConstructReference(ctx, prefix, dataset.Project, dataset.Domain, dataset.Name, dataset.Version, artifact.Id, artifactDataName, artifactDataFile) + dataset := artifact.GetDataset() + return store.ConstructReference(ctx, prefix, dataset.GetProject(), dataset.GetDomain(), dataset.GetName(), dataset.GetVersion(), artifact.GetId(), artifactDataName, artifactDataFile) } func getExpectedArtifactModel(ctx context.Context, t *testing.T, datastore *storage.DataStore, artifact *datacatalog.Artifact) models.Artifact { - expectedDataset := artifact.Dataset + expectedDataset := artifact.GetDataset() - artifactData := make([]models.ArtifactData, len(artifact.Data)) + artifactData := make([]models.ArtifactData, len(artifact.GetData())) // Write sample artifact data to the expected location and see if the retrieved data matches - for i := range artifact.Data { + for i := range artifact.GetData() { testStoragePrefix, err := datastore.ConstructReference(ctx, datastore.GetBaseContainerFQN(ctx), "test") assert.NoError(t, err) dataLocation, err := getExpectedDatastoreLocation(ctx, datastore, testStoragePrefix, artifact, i) assert.NoError(t, err) - err = datastore.WriteProtobuf(ctx, dataLocation, storage.Options{}, artifact.Data[i].Value) + err = datastore.WriteProtobuf(ctx, dataLocation, storage.Options{}, artifact.GetData()[i].GetValue()) assert.NoError(t, err) - artifactData[i].Name = artifact.Data[i].Name + artifactData[i].Name = artifact.GetData()[i].GetName() artifactData[i].Location = dataLocation.String() } // construct the artifact model we will return on the queries - serializedMetadata, err := proto.Marshal(artifact.Metadata) + serializedMetadata, err := proto.Marshal(artifact.GetMetadata()) assert.NoError(t, err) datasetKey := models.DatasetKey{ - Project: expectedDataset.Project, - Domain: expectedDataset.Domain, - Version: expectedDataset.Version, - Name: expectedDataset.Name, - UUID: expectedDataset.UUID, + Project: expectedDataset.GetProject(), + Domain: expectedDataset.GetDomain(), + Version: expectedDataset.GetVersion(), + Name: expectedDataset.GetName(), + UUID: expectedDataset.GetUUID(), } return models.Artifact{ ArtifactKey: models.ArtifactKey{ - DatasetProject: expectedDataset.Project, - DatasetDomain: expectedDataset.Domain, - DatasetVersion: expectedDataset.Version, - DatasetName: expectedDataset.Name, - ArtifactID: artifact.Id, + DatasetProject: expectedDataset.GetProject(), + DatasetDomain: expectedDataset.GetDomain(), + DatasetVersion: expectedDataset.GetVersion(), + DatasetName: expectedDataset.GetName(), + ArtifactID: artifact.GetId(), }, - DatasetUUID: expectedDataset.UUID, + DatasetUUID: expectedDataset.GetUUID(), ArtifactData: artifactData, Dataset: models.Dataset{ DatasetKey: datasetKey, @@ -162,7 +162,7 @@ func getExpectedArtifactModel(ctx context.Context, t *testing.T, datastore *stor {Key: "key2", Value: "value2"}, }, Tags: []models.Tag{ - {TagKey: models.TagKey{TagName: "test-tag"}, DatasetUUID: expectedDataset.UUID, ArtifactID: artifact.Id}, + {TagKey: models.TagKey{TagName: "test-tag"}, DatasetUUID: expectedDataset.GetUUID(), ArtifactID: artifact.GetId()}, }, BaseModel: models.BaseModel{ CreatedAt: getTestTimestamp(), @@ -180,15 +180,15 @@ func TestCreateArtifact(t *testing.T) { expectedDataset := getTestDataset() mockDatasetModel := models.Dataset{ DatasetKey: models.DatasetKey{ - Project: expectedDataset.Id.Project, - Domain: expectedDataset.Id.Domain, - Name: expectedDataset.Id.Name, - Version: expectedDataset.Id.Version, - UUID: expectedDataset.Id.UUID, + Project: expectedDataset.GetId().GetProject(), + Domain: expectedDataset.GetId().GetDomain(), + Name: expectedDataset.GetId().GetName(), + Version: expectedDataset.GetId().GetVersion(), + UUID: expectedDataset.GetId().GetUUID(), }, PartitionKeys: []models.PartitionKey{ - {Name: expectedDataset.PartitionKeys[0]}, - {Name: expectedDataset.PartitionKeys[1]}, + {Name: expectedDataset.GetPartitionKeys()[0]}, + {Name: expectedDataset.GetPartitionKeys()[1]}, }, } @@ -200,30 +200,30 @@ func TestCreateArtifact(t *testing.T) { dcRepo := newMockDataCatalogRepo() dcRepo.MockDatasetRepo.On("Get", mock.Anything, mock.MatchedBy(func(dataset models.DatasetKey) bool { - return dataset.Project == expectedDataset.Id.Project && - dataset.Domain == expectedDataset.Id.Domain && - dataset.Name == expectedDataset.Id.Name && - dataset.Version == expectedDataset.Id.Version + return dataset.Project == expectedDataset.GetId().GetProject() && + dataset.Domain == expectedDataset.GetId().GetDomain() && + dataset.Name == expectedDataset.GetId().GetName() && + dataset.Version == expectedDataset.GetId().GetVersion() })).Return(mockDatasetModel, nil) dcRepo.MockArtifactRepo.On("Create", mock.MatchedBy(func(ctx context.Context) bool { return true }), mock.MatchedBy(func(artifact models.Artifact) bool { expectedArtifact := getTestArtifact() - return artifact.ArtifactID == expectedArtifact.Id && + return artifact.ArtifactID == expectedArtifact.GetId() && artifact.SerializedMetadata != nil && - len(artifact.ArtifactData) == len(expectedArtifact.Data) && - artifact.ArtifactKey.DatasetProject == expectedArtifact.Dataset.Project && - artifact.ArtifactKey.DatasetDomain == expectedArtifact.Dataset.Domain && - artifact.ArtifactKey.DatasetName == expectedArtifact.Dataset.Name && - artifact.ArtifactKey.DatasetVersion == expectedArtifact.Dataset.Version && - artifact.DatasetUUID == expectedArtifact.Dataset.UUID && - artifact.Partitions[0].Key == expectedArtifact.Partitions[0].Key && - artifact.Partitions[0].Value == expectedArtifact.Partitions[0].Value && - artifact.Partitions[0].DatasetUUID == expectedDataset.Id.UUID && - artifact.Partitions[1].Key == expectedArtifact.Partitions[1].Key && - artifact.Partitions[1].Value == expectedArtifact.Partitions[1].Value && - artifact.Partitions[1].DatasetUUID == expectedDataset.Id.UUID + len(artifact.ArtifactData) == len(expectedArtifact.GetData()) && + artifact.ArtifactKey.DatasetProject == expectedArtifact.GetDataset().GetProject() && + artifact.ArtifactKey.DatasetDomain == expectedArtifact.GetDataset().GetDomain() && + artifact.ArtifactKey.DatasetName == expectedArtifact.GetDataset().GetName() && + artifact.ArtifactKey.DatasetVersion == expectedArtifact.GetDataset().GetVersion() && + artifact.DatasetUUID == expectedArtifact.GetDataset().GetUUID() && + artifact.Partitions[0].Key == expectedArtifact.GetPartitions()[0].GetKey() && + artifact.Partitions[0].Value == expectedArtifact.GetPartitions()[0].GetValue() && + artifact.Partitions[0].DatasetUUID == expectedDataset.GetId().GetUUID() && + artifact.Partitions[1].Key == expectedArtifact.GetPartitions()[1].GetKey() && + artifact.Partitions[1].Value == expectedArtifact.GetPartitions()[1].GetValue() && + artifact.Partitions[1].DatasetUUID == expectedDataset.GetId().GetUUID() })).Return(nil) request := &datacatalog.CreateArtifactRequest{Artifact: getTestArtifact()} @@ -238,7 +238,7 @@ func TestCreateArtifact(t *testing.T) { var value core.Literal err = datastore.ReadProtobuf(ctx, dataRef, &value) assert.NoError(t, err) - assert.True(t, proto.Equal(&value, getTestArtifact().Data[0].Value)) + assert.True(t, proto.Equal(&value, getTestArtifact().GetData()[0].GetValue())) }) t.Run("Dataset does not exist", func(t *testing.T) { @@ -258,7 +258,7 @@ func TestCreateArtifact(t *testing.T) { request := &datacatalog.CreateArtifactRequest{ Artifact: &datacatalog.Artifact{ // missing artifact id - Dataset: getTestDataset().Id, + Dataset: getTestDataset().GetId(), }, } @@ -273,7 +273,7 @@ func TestCreateArtifact(t *testing.T) { request := &datacatalog.CreateArtifactRequest{ Artifact: &datacatalog.Artifact{ Id: "test", - Dataset: getTestDataset().Id, + Dataset: getTestDataset().GetId(), // missing artifactData }, } @@ -294,13 +294,13 @@ func TestCreateArtifact(t *testing.T) { mock.MatchedBy(func(ctx context.Context) bool { return true }), mock.MatchedBy(func(artifact models.Artifact) bool { expectedArtifact := getTestArtifact() - return artifact.ArtifactID == expectedArtifact.Id && + return artifact.ArtifactID == expectedArtifact.GetId() && artifact.SerializedMetadata != nil && - len(artifact.ArtifactData) == len(expectedArtifact.Data) && - artifact.ArtifactKey.DatasetProject == expectedArtifact.Dataset.Project && - artifact.ArtifactKey.DatasetDomain == expectedArtifact.Dataset.Domain && - artifact.ArtifactKey.DatasetName == expectedArtifact.Dataset.Name && - artifact.ArtifactKey.DatasetVersion == expectedArtifact.Dataset.Version + len(artifact.ArtifactData) == len(expectedArtifact.GetData()) && + artifact.ArtifactKey.DatasetProject == expectedArtifact.GetDataset().GetProject() && + artifact.ArtifactKey.DatasetDomain == expectedArtifact.GetDataset().GetDomain() && + artifact.ArtifactKey.DatasetName == expectedArtifact.GetDataset().GetName() && + artifact.ArtifactKey.DatasetVersion == expectedArtifact.GetDataset().GetVersion() })).Return(status.Error(codes.AlreadyExists, "test already exists")) request := &datacatalog.CreateArtifactRequest{Artifact: getTestArtifact()} @@ -338,10 +338,10 @@ func TestCreateArtifact(t *testing.T) { dcRepo := newMockDataCatalogRepo() mockDatasetModel := models.Dataset{ DatasetKey: models.DatasetKey{ - Project: expectedDataset.Id.Project, - Domain: expectedDataset.Id.Domain, - Name: expectedDataset.Id.Name, - Version: expectedDataset.Id.Version, + Project: expectedDataset.GetId().GetProject(), + Domain: expectedDataset.GetId().GetDomain(), + Name: expectedDataset.GetId().GetName(), + Version: expectedDataset.GetId().GetVersion(), }, } dcRepo.MockDatasetRepo.On("Get", mock.Anything, mock.Anything).Return(mockDatasetModel, nil) @@ -392,21 +392,21 @@ func TestGetArtifact(t *testing.T) { t.Run("Get by Id", func(t *testing.T) { dcRepo.MockArtifactRepo.On("Get", mock.Anything, mock.MatchedBy(func(artifactKey models.ArtifactKey) bool { - return artifactKey.ArtifactID == expectedArtifact.Id && - artifactKey.DatasetProject == expectedArtifact.Dataset.Project && - artifactKey.DatasetDomain == expectedArtifact.Dataset.Domain && - artifactKey.DatasetVersion == expectedArtifact.Dataset.Version && - artifactKey.DatasetName == expectedArtifact.Dataset.Name + return artifactKey.ArtifactID == expectedArtifact.GetId() && + artifactKey.DatasetProject == expectedArtifact.GetDataset().GetProject() && + artifactKey.DatasetDomain == expectedArtifact.GetDataset().GetDomain() && + artifactKey.DatasetVersion == expectedArtifact.GetDataset().GetVersion() && + artifactKey.DatasetName == expectedArtifact.GetDataset().GetName() })).Return(mockArtifactModel, nil) artifactManager := NewArtifactManager(dcRepo, datastore, testStoragePrefix, mockScope.NewTestScope()) artifactResponse, err := artifactManager.GetArtifact(ctx, &datacatalog.GetArtifactRequest{ - Dataset: getTestDataset().Id, - QueryHandle: &datacatalog.GetArtifactRequest_ArtifactId{ArtifactId: expectedArtifact.Id}, + Dataset: getTestDataset().GetId(), + QueryHandle: &datacatalog.GetArtifactRequest_ArtifactId{ArtifactId: expectedArtifact.GetId()}, }) assert.NoError(t, err) - assert.True(t, proto.Equal(expectedArtifact, artifactResponse.Artifact)) + assert.True(t, proto.Equal(expectedArtifact, artifactResponse.GetArtifact())) }) t.Run("Get by Artifact Tag", func(t *testing.T) { @@ -434,16 +434,16 @@ func TestGetArtifact(t *testing.T) { artifactManager := NewArtifactManager(dcRepo, datastore, testStoragePrefix, mockScope.NewTestScope()) artifactResponse, err := artifactManager.GetArtifact(ctx, &datacatalog.GetArtifactRequest{ - Dataset: getTestDataset().Id, + Dataset: getTestDataset().GetId(), QueryHandle: &datacatalog.GetArtifactRequest_TagName{TagName: expectedTag.TagName}, }) assert.NoError(t, err) - assert.True(t, proto.Equal(expectedArtifact, artifactResponse.Artifact)) + assert.True(t, proto.Equal(expectedArtifact, artifactResponse.GetArtifact())) }) t.Run("Get missing input", func(t *testing.T) { artifactManager := NewArtifactManager(dcRepo, datastore, testStoragePrefix, mockScope.NewTestScope()) - artifactResponse, err := artifactManager.GetArtifact(ctx, &datacatalog.GetArtifactRequest{Dataset: getTestDataset().Id}) + artifactResponse, err := artifactManager.GetArtifact(ctx, &datacatalog.GetArtifactRequest{Dataset: getTestDataset().GetId()}) assert.Error(t, err) assert.Nil(t, artifactResponse) responseCode := status.Code(err) @@ -454,7 +454,7 @@ func TestGetArtifact(t *testing.T) { dcRepo.MockTagRepo.On("Get", mock.Anything, mock.Anything).Return( models.Tag{}, errors.NewDataCatalogError(codes.NotFound, "tag with artifact does not exist")) artifactManager := NewArtifactManager(dcRepo, datastore, testStoragePrefix, mockScope.NewTestScope()) - artifactResponse, err := artifactManager.GetArtifact(ctx, &datacatalog.GetArtifactRequest{Dataset: getTestDataset().Id, QueryHandle: &datacatalog.GetArtifactRequest_TagName{TagName: "test"}}) + artifactResponse, err := artifactManager.GetArtifact(ctx, &datacatalog.GetArtifactRequest{Dataset: getTestDataset().GetId(), QueryHandle: &datacatalog.GetArtifactRequest_TagName{TagName: "test"}}) assert.Error(t, err) assert.Nil(t, artifactResponse) responseCode := status.Code(err) @@ -473,11 +473,11 @@ func TestListArtifact(t *testing.T) { expectedDataset := getTestDataset() mockDatasetModel := models.Dataset{ DatasetKey: models.DatasetKey{ - Project: expectedDataset.Id.Project, - Domain: expectedDataset.Id.Domain, - Name: expectedDataset.Id.Name, - Version: expectedDataset.Id.Version, - UUID: expectedDataset.Id.UUID, + Project: expectedDataset.GetId().GetProject(), + Domain: expectedDataset.GetId().GetDomain(), + Name: expectedDataset.GetId().GetName(), + Version: expectedDataset.GetId().GetVersion(), + UUID: expectedDataset.GetId().GetUUID(), }, } @@ -500,7 +500,7 @@ func TestListArtifact(t *testing.T) { }, } - artifactResponse, err := artifactManager.ListArtifacts(ctx, &datacatalog.ListArtifactsRequest{Dataset: getTestDataset().Id, Filter: filter}) + artifactResponse, err := artifactManager.ListArtifacts(ctx, &datacatalog.ListArtifactsRequest{Dataset: getTestDataset().GetId(), Filter: filter}) assert.Error(t, err) assert.Nil(t, artifactResponse) responseCode := status.Code(err) @@ -543,10 +543,10 @@ func TestListArtifact(t *testing.T) { dcRepo.MockDatasetRepo.On("Get", mock.Anything, mock.MatchedBy(func(dataset models.DatasetKey) bool { - return dataset.Project == expectedDataset.Id.Project && - dataset.Domain == expectedDataset.Id.Domain && - dataset.Name == expectedDataset.Id.Name && - dataset.Version == expectedDataset.Id.Version + return dataset.Project == expectedDataset.GetId().GetProject() && + dataset.Domain == expectedDataset.GetId().GetDomain() && + dataset.Name == expectedDataset.GetId().GetName() && + dataset.Version == expectedDataset.GetId().GetVersion() })).Return(mockDatasetModel, nil) mockArtifacts := []models.Artifact{ @@ -556,10 +556,10 @@ func TestListArtifact(t *testing.T) { dcRepo.MockArtifactRepo.On("List", mock.Anything, mock.MatchedBy(func(dataset models.DatasetKey) bool { - return dataset.Project == expectedDataset.Id.Project && - dataset.Domain == expectedDataset.Id.Domain && - dataset.Name == expectedDataset.Id.Name && - dataset.Version == expectedDataset.Id.Version + return dataset.Project == expectedDataset.GetId().GetProject() && + dataset.Domain == expectedDataset.GetId().GetDomain() && + dataset.Name == expectedDataset.GetId().GetName() && + dataset.Version == expectedDataset.GetId().GetVersion() }), mock.MatchedBy(func(listInput models.ListModelsInput) bool { return len(listInput.ModelFilters) == 3 && @@ -573,7 +573,7 @@ func TestListArtifact(t *testing.T) { listInput.Offset == 0 })).Return(mockArtifacts, nil) - artifactResponse, err := artifactManager.ListArtifacts(ctx, &datacatalog.ListArtifactsRequest{Dataset: expectedDataset.Id, Filter: filter}) + artifactResponse, err := artifactManager.ListArtifacts(ctx, &datacatalog.ListArtifactsRequest{Dataset: expectedDataset.GetId(), Filter: filter}) assert.NoError(t, err) assert.NotEmpty(t, artifactResponse) }) @@ -584,10 +584,10 @@ func TestListArtifact(t *testing.T) { dcRepo.MockDatasetRepo.On("Get", mock.Anything, mock.MatchedBy(func(dataset models.DatasetKey) bool { - return dataset.Project == expectedDataset.Id.Project && - dataset.Domain == expectedDataset.Id.Domain && - dataset.Name == expectedDataset.Id.Name && - dataset.Version == expectedDataset.Id.Version + return dataset.Project == expectedDataset.GetId().GetProject() && + dataset.Domain == expectedDataset.GetId().GetDomain() && + dataset.Name == expectedDataset.GetId().GetName() && + dataset.Version == expectedDataset.GetId().GetVersion() })).Return(mockDatasetModel, nil) mockArtifacts := []models.Artifact{ @@ -596,16 +596,16 @@ func TestListArtifact(t *testing.T) { } dcRepo.MockArtifactRepo.On("List", mock.Anything, mock.MatchedBy(func(dataset models.DatasetKey) bool { - return dataset.Project == expectedDataset.Id.Project && - dataset.Domain == expectedDataset.Id.Domain && - dataset.Name == expectedDataset.Id.Name && - dataset.Version == expectedDataset.Id.Version + return dataset.Project == expectedDataset.GetId().GetProject() && + dataset.Domain == expectedDataset.GetId().GetDomain() && + dataset.Name == expectedDataset.GetId().GetName() && + dataset.Version == expectedDataset.GetId().GetVersion() }), mock.MatchedBy(func(listInput models.ListModelsInput) bool { return len(listInput.ModelFilters) == 0 })).Return(mockArtifacts, nil) - artifactResponse, err := artifactManager.ListArtifacts(ctx, &datacatalog.ListArtifactsRequest{Dataset: expectedDataset.Id, Filter: filter}) + artifactResponse, err := artifactManager.ListArtifacts(ctx, &datacatalog.ListArtifactsRequest{Dataset: expectedDataset.GetId(), Filter: filter}) assert.NoError(t, err) assert.NotEmpty(t, artifactResponse) }) @@ -634,11 +634,11 @@ func TestUpdateArtifact(t *testing.T) { dcRepo.MockArtifactRepo.On("Get", mock.MatchedBy(func(ctx context.Context) bool { return true }), mock.MatchedBy(func(artifactKey models.ArtifactKey) bool { - return artifactKey.ArtifactID == expectedArtifact.Id && - artifactKey.DatasetProject == expectedArtifact.Dataset.Project && - artifactKey.DatasetDomain == expectedArtifact.Dataset.Domain && - artifactKey.DatasetName == expectedArtifact.Dataset.Name && - artifactKey.DatasetVersion == expectedArtifact.Dataset.Version + return artifactKey.ArtifactID == expectedArtifact.GetId() && + artifactKey.DatasetProject == expectedArtifact.GetDataset().GetProject() && + artifactKey.DatasetDomain == expectedArtifact.GetDataset().GetDomain() && + artifactKey.DatasetName == expectedArtifact.GetDataset().GetName() && + artifactKey.DatasetVersion == expectedArtifact.GetDataset().GetVersion() })).Return(mockArtifactModel, nil) metaData := &datacatalog.Metadata{ @@ -650,18 +650,18 @@ func TestUpdateArtifact(t *testing.T) { dcRepo.MockArtifactRepo.On("Update", mock.MatchedBy(func(ctx context.Context) bool { return true }), mock.MatchedBy(func(artifact models.Artifact) bool { - return artifact.ArtifactID == expectedArtifact.Id && - artifact.ArtifactKey.DatasetProject == expectedArtifact.Dataset.Project && - artifact.ArtifactKey.DatasetDomain == expectedArtifact.Dataset.Domain && - artifact.ArtifactKey.DatasetName == expectedArtifact.Dataset.Name && - artifact.ArtifactKey.DatasetVersion == expectedArtifact.Dataset.Version && + return artifact.ArtifactID == expectedArtifact.GetId() && + artifact.ArtifactKey.DatasetProject == expectedArtifact.GetDataset().GetProject() && + artifact.ArtifactKey.DatasetDomain == expectedArtifact.GetDataset().GetDomain() && + artifact.ArtifactKey.DatasetName == expectedArtifact.GetDataset().GetName() && + artifact.ArtifactKey.DatasetVersion == expectedArtifact.GetDataset().GetVersion() && reflect.DeepEqual(artifact.SerializedMetadata, serializedMetadata) })).Return(nil) request := &datacatalog.UpdateArtifactRequest{ - Dataset: expectedDataset.Id, + Dataset: expectedDataset.GetId(), QueryHandle: &datacatalog.UpdateArtifactRequest_ArtifactId{ - ArtifactId: expectedArtifact.Id, + ArtifactId: expectedArtifact.GetId(), }, Data: []*datacatalog.ArtifactData{ { @@ -682,7 +682,7 @@ func TestUpdateArtifact(t *testing.T) { artifactResponse, err := artifactManager.UpdateArtifact(ctx, request) assert.NoError(t, err) assert.NotNil(t, artifactResponse) - assert.Equal(t, expectedArtifact.Id, artifactResponse.GetArtifactId()) + assert.Equal(t, expectedArtifact.GetId(), artifactResponse.GetArtifactId()) dcRepo.MockArtifactRepo.AssertExpectations(t) // check that the datastore has the updated artifactData available @@ -724,11 +724,11 @@ func TestUpdateArtifact(t *testing.T) { dcRepo.MockArtifactRepo.On("Update", mock.MatchedBy(func(ctx context.Context) bool { return true }), mock.MatchedBy(func(artifact models.Artifact) bool { - return artifact.ArtifactID == expectedArtifact.Id && - artifact.ArtifactKey.DatasetProject == expectedArtifact.Dataset.Project && - artifact.ArtifactKey.DatasetDomain == expectedArtifact.Dataset.Domain && - artifact.ArtifactKey.DatasetName == expectedArtifact.Dataset.Name && - artifact.ArtifactKey.DatasetVersion == expectedArtifact.Dataset.Version && + return artifact.ArtifactID == expectedArtifact.GetId() && + artifact.ArtifactKey.DatasetProject == expectedArtifact.GetDataset().GetProject() && + artifact.ArtifactKey.DatasetDomain == expectedArtifact.GetDataset().GetDomain() && + artifact.ArtifactKey.DatasetName == expectedArtifact.GetDataset().GetName() && + artifact.ArtifactKey.DatasetVersion == expectedArtifact.GetDataset().GetVersion() && reflect.DeepEqual(artifact.SerializedMetadata, serializedMetadata) })).Return(nil) @@ -753,7 +753,7 @@ func TestUpdateArtifact(t *testing.T) { }, nil) request := &datacatalog.UpdateArtifactRequest{ - Dataset: expectedDataset.Id, + Dataset: expectedDataset.GetId(), QueryHandle: &datacatalog.UpdateArtifactRequest_TagName{ TagName: expectedTag.TagName, }, @@ -776,7 +776,7 @@ func TestUpdateArtifact(t *testing.T) { artifactResponse, err := artifactManager.UpdateArtifact(ctx, request) assert.NoError(t, err) assert.NotNil(t, artifactResponse) - assert.Equal(t, expectedArtifact.Id, artifactResponse.GetArtifactId()) + assert.Equal(t, expectedArtifact.GetId(), artifactResponse.GetArtifactId()) dcRepo.MockArtifactRepo.AssertExpectations(t) // check that the datastore has the updated artifactData available @@ -809,14 +809,14 @@ func TestUpdateArtifact(t *testing.T) { dcRepo := newMockDataCatalogRepo() dcRepo.MockArtifactRepo.On("Get", mock.Anything, mock.Anything).Return(models.Artifact{}, repoErrors.GetMissingEntityError("Artifact", &datacatalog.Artifact{ - Dataset: expectedDataset.Id, - Id: expectedArtifact.Id, + Dataset: expectedDataset.GetId(), + Id: expectedArtifact.GetId(), })) request := &datacatalog.UpdateArtifactRequest{ - Dataset: expectedDataset.Id, + Dataset: expectedDataset.GetId(), QueryHandle: &datacatalog.UpdateArtifactRequest_ArtifactId{ - ArtifactId: expectedArtifact.Id, + ArtifactId: expectedArtifact.GetId(), }, Data: []*datacatalog.ArtifactData{ { @@ -844,7 +844,7 @@ func TestUpdateArtifact(t *testing.T) { dcRepo := newMockDataCatalogRepo() request := &datacatalog.UpdateArtifactRequest{ - Dataset: expectedDataset.Id, + Dataset: expectedDataset.GetId(), QueryHandle: &datacatalog.UpdateArtifactRequest_ArtifactId{}, Data: []*datacatalog.ArtifactData{ { @@ -872,7 +872,7 @@ func TestUpdateArtifact(t *testing.T) { dcRepo := newMockDataCatalogRepo() request := &datacatalog.UpdateArtifactRequest{ - Dataset: expectedDataset.Id, + Dataset: expectedDataset.GetId(), QueryHandle: &datacatalog.UpdateArtifactRequest_TagName{}, Data: []*datacatalog.ArtifactData{ { @@ -900,9 +900,9 @@ func TestUpdateArtifact(t *testing.T) { dcRepo := newMockDataCatalogRepo() request := &datacatalog.UpdateArtifactRequest{ - Dataset: expectedDataset.Id, + Dataset: expectedDataset.GetId(), QueryHandle: &datacatalog.UpdateArtifactRequest_ArtifactId{ - ArtifactId: expectedArtifact.Id, + ArtifactId: expectedArtifact.GetId(), }, Data: nil, } @@ -921,9 +921,9 @@ func TestUpdateArtifact(t *testing.T) { dcRepo := newMockDataCatalogRepo() request := &datacatalog.UpdateArtifactRequest{ - Dataset: expectedDataset.Id, + Dataset: expectedDataset.GetId(), QueryHandle: &datacatalog.UpdateArtifactRequest_ArtifactId{ - ArtifactId: expectedArtifact.Id, + ArtifactId: expectedArtifact.GetId(), }, Data: []*datacatalog.ArtifactData{}, } diff --git a/datacatalog/pkg/manager/impl/dataset_manager.go b/datacatalog/pkg/manager/impl/dataset_manager.go index 0db84d6360..8caca3f3a3 100644 --- a/datacatalog/pkg/manager/impl/dataset_manager.go +++ b/datacatalog/pkg/manager/impl/dataset_manager.go @@ -44,12 +44,12 @@ type datasetManager struct { func (dm *datasetManager) validateCreateRequest(request *datacatalog.CreateDatasetRequest) error { errorSet := make([]error, 0) - err := validators.ValidateDatasetID(request.Dataset.Id) + err := validators.ValidateDatasetID(request.GetDataset().GetId()) if err != nil { errorSet = append(errorSet, err) } - err = validators.ValidateUniquePartitionKeys(request.Dataset.PartitionKeys) + err = validators.ValidateUniquePartitionKeys(request.GetDataset().GetPartitionKeys()) if err != nil { errorSet = append(errorSet, err) } @@ -71,7 +71,7 @@ func (dm *datasetManager) CreateDataset(ctx context.Context, request *datacatalo return nil, err } - datasetModel, err := transformers.CreateDatasetModel(request.Dataset) + datasetModel, err := transformers.CreateDatasetModel(request.GetDataset()) if err != nil { logger.Errorf(ctx, "Unable to transform create dataset request %+v err: %v", request, err) dm.systemMetrics.transformerErrorCounter.Inc(ctx) @@ -81,7 +81,7 @@ func (dm *datasetManager) CreateDataset(ctx context.Context, request *datacatalo err = dm.repo.DatasetRepo().Create(ctx, *datasetModel) if err != nil { if errors.IsAlreadyExistsError(err) { - logger.Warnf(ctx, "Dataset already exists key: %+v, err %v", request.Dataset, err) + logger.Warnf(ctx, "Dataset already exists key: %+v, err %v", request.GetDataset(), err) dm.systemMetrics.alreadyExistsCounter.Inc(ctx) } else { logger.Errorf(ctx, "Failed to create dataset model: %+v err: %v", datasetModel, err) @@ -90,7 +90,7 @@ func (dm *datasetManager) CreateDataset(ctx context.Context, request *datacatalo return nil, err } - logger.Debugf(ctx, "Successfully created dataset %+v", request.Dataset) + logger.Debugf(ctx, "Successfully created dataset %+v", request.GetDataset()) dm.systemMetrics.createSuccessCounter.Inc(ctx) return &datacatalog.CreateDatasetResponse{}, nil } @@ -100,14 +100,14 @@ func (dm *datasetManager) GetDataset(ctx context.Context, request *datacatalog.G timer := dm.systemMetrics.getResponseTime.Start(ctx) defer timer.Stop() - err := validators.ValidateDatasetID(request.Dataset) + err := validators.ValidateDatasetID(request.GetDataset()) if err != nil { logger.Warnf(ctx, "Invalid get dataset request %+v err: %v", request, err) dm.systemMetrics.validationErrorCounter.Inc(ctx) return nil, err } - datasetKey := transformers.FromDatasetID(request.Dataset) + datasetKey := transformers.FromDatasetID(request.GetDataset()) datasetModel, err := dm.repo.DatasetRepo().Get(ctx, datasetKey) if err != nil { @@ -150,7 +150,7 @@ func (dm *datasetManager) ListDatasets(ctx context.Context, request *datacatalog return nil, err } - err = transformers.ApplyPagination(request.Pagination, &listInput) + err = transformers.ApplyPagination(request.GetPagination(), &listInput) if err != nil { logger.Warningf(ctx, "Invalid pagination options in list datasets request %v, err: %v", request, err) dm.systemMetrics.validationErrorCounter.Inc(ctx) @@ -171,7 +171,7 @@ func (dm *datasetManager) ListDatasets(ctx context.Context, request *datacatalog for idx, datasetModel := range datasetModels { dataset, err := transformers.FromDatasetModel(datasetModel) if err != nil { - logger.Errorf(ctx, "Unable to transform Dataset %+v err: %v", dataset.Id, err) + logger.Errorf(ctx, "Unable to transform Dataset %+v err: %v", dataset.GetId(), err) transformerErrs = append(transformerErrs, err) } diff --git a/datacatalog/pkg/manager/impl/dataset_manager_test.go b/datacatalog/pkg/manager/impl/dataset_manager_test.go index 2ebd107304..9d668fdef1 100644 --- a/datacatalog/pkg/manager/impl/dataset_manager_test.go +++ b/datacatalog/pkg/manager/impl/dataset_manager_test.go @@ -58,13 +58,13 @@ func TestCreateDataset(t *testing.T) { mock.MatchedBy(func(ctx context.Context) bool { return true }), mock.MatchedBy(func(dataset models.Dataset) bool { - return dataset.Name == expectedDataset.Id.Name && - dataset.Project == expectedDataset.Id.Project && - dataset.Domain == expectedDataset.Id.Domain && - dataset.Version == expectedDataset.Id.Version && - len(dataset.PartitionKeys) == len(expectedDataset.PartitionKeys) && - dataset.PartitionKeys[0].Name == expectedDataset.PartitionKeys[0] && - dataset.PartitionKeys[1].Name == expectedDataset.PartitionKeys[1] + return dataset.Name == expectedDataset.GetId().GetName() && + dataset.Project == expectedDataset.GetId().GetProject() && + dataset.Domain == expectedDataset.GetId().GetDomain() && + dataset.Version == expectedDataset.GetId().GetVersion() && + len(dataset.PartitionKeys) == len(expectedDataset.GetPartitionKeys()) && + dataset.PartitionKeys[0].Name == expectedDataset.GetPartitionKeys()[0] && + dataset.PartitionKeys[1].Name == expectedDataset.GetPartitionKeys()[1] })).Return(nil) request := &datacatalog.CreateDatasetRequest{Dataset: expectedDataset} datasetResponse, err := datasetManager.CreateDataset(context.Background(), request) @@ -79,10 +79,10 @@ func TestCreateDataset(t *testing.T) { mock.MatchedBy(func(ctx context.Context) bool { return true }), mock.MatchedBy(func(dataset models.Dataset) bool { - return dataset.Name == expectedDataset.Id.Name && - dataset.Project == expectedDataset.Id.Project && - dataset.Domain == expectedDataset.Id.Domain && - dataset.Version == expectedDataset.Id.Version && + return dataset.Name == expectedDataset.GetId().GetName() && + dataset.Project == expectedDataset.GetId().GetProject() && + dataset.Domain == expectedDataset.GetId().GetDomain() && + dataset.Version == expectedDataset.GetId().GetVersion() && len(dataset.PartitionKeys) == 0 })).Return(nil) @@ -132,7 +132,7 @@ func TestCreateDataset(t *testing.T) { t.Run("DuplicatePartition", func(t *testing.T) { dcRepo := getDataCatalogRepo() badDataset := getTestDataset() - badDataset.PartitionKeys = append(badDataset.PartitionKeys, badDataset.PartitionKeys[0]) + badDataset.PartitionKeys = append(badDataset.PartitionKeys, badDataset.GetPartitionKeys()[0]) datasetManager := NewDatasetManager(dcRepo, nil, mockScope.NewTestScope()) dcRepo.MockDatasetRepo.On("Create", @@ -162,17 +162,17 @@ func TestGetDataset(t *testing.T) { mock.MatchedBy(func(ctx context.Context) bool { return true }), mock.MatchedBy(func(datasetKey models.DatasetKey) bool { - return datasetKey.Name == expectedDataset.Id.Name && - datasetKey.Project == expectedDataset.Id.Project && - datasetKey.Domain == expectedDataset.Id.Domain && - datasetKey.Version == expectedDataset.Id.Version + return datasetKey.Name == expectedDataset.GetId().GetName() && + datasetKey.Project == expectedDataset.GetId().GetProject() && + datasetKey.Domain == expectedDataset.GetId().GetDomain() && + datasetKey.Version == expectedDataset.GetId().GetVersion() })).Return(*datasetModelResponse, nil) - request := &datacatalog.GetDatasetRequest{Dataset: getTestDataset().Id} + request := &datacatalog.GetDatasetRequest{Dataset: getTestDataset().GetId()} datasetResponse, err := datasetManager.GetDataset(context.Background(), request) assert.NoError(t, err) assert.NotNil(t, datasetResponse) - assert.True(t, proto.Equal(datasetResponse.Dataset, expectedDataset)) - assert.EqualValues(t, datasetResponse.Dataset.Metadata.KeyMap, expectedDataset.Metadata.KeyMap) + assert.True(t, proto.Equal(datasetResponse.GetDataset(), expectedDataset)) + assert.EqualValues(t, datasetResponse.GetDataset().GetMetadata().GetKeyMap(), expectedDataset.GetMetadata().GetKeyMap()) }) t.Run("Does not exist", func(t *testing.T) { @@ -183,12 +183,12 @@ func TestGetDataset(t *testing.T) { mock.MatchedBy(func(ctx context.Context) bool { return true }), mock.MatchedBy(func(datasetKey models.DatasetKey) bool { - return datasetKey.Name == expectedDataset.Id.Name && - datasetKey.Project == expectedDataset.Id.Project && - datasetKey.Domain == expectedDataset.Id.Domain && - datasetKey.Version == expectedDataset.Id.Version + return datasetKey.Name == expectedDataset.GetId().GetName() && + datasetKey.Project == expectedDataset.GetId().GetProject() && + datasetKey.Domain == expectedDataset.GetId().GetDomain() && + datasetKey.Version == expectedDataset.GetId().GetVersion() })).Return(models.Dataset{}, errors.NewDataCatalogError(codes.NotFound, "dataset does not exist")) - request := &datacatalog.GetDatasetRequest{Dataset: getTestDataset().Id} + request := &datacatalog.GetDatasetRequest{Dataset: getTestDataset().GetId()} _, err := datasetManager.GetDataset(context.Background(), request) assert.Error(t, err) responseCode := status.Code(err) @@ -267,7 +267,7 @@ func TestListDatasets(t *testing.T) { datasetResponse, err := datasetManager.ListDatasets(ctx, &datacatalog.ListDatasetsRequest{Filter: filter}) assert.NoError(t, err) assert.NotEmpty(t, datasetResponse) - assert.Len(t, datasetResponse.Datasets, 1) + assert.Len(t, datasetResponse.GetDatasets(), 1) }) t.Run("List Datasets with no filtering", func(t *testing.T) { @@ -286,6 +286,6 @@ func TestListDatasets(t *testing.T) { datasetResponse, err := datasetManager.ListDatasets(ctx, &datacatalog.ListDatasetsRequest{}) assert.NoError(t, err) assert.NotEmpty(t, datasetResponse) - assert.Len(t, datasetResponse.Datasets, 1) + assert.Len(t, datasetResponse.GetDatasets(), 1) }) } diff --git a/datacatalog/pkg/manager/impl/reservation_manager.go b/datacatalog/pkg/manager/impl/reservation_manager.go index 394ad5a55d..62dbb25668 100644 --- a/datacatalog/pkg/manager/impl/reservation_manager.go +++ b/datacatalog/pkg/manager/impl/reservation_manager.go @@ -88,7 +88,7 @@ func NewReservationManager( // Attempt to acquire a reservation for the specified artifact. If there is not active reservation, successfully // acquire it. If you are the owner of the active reservation, extend it. If another owner, return the existing reservation. func (r *reservationManager) GetOrExtendReservation(ctx context.Context, request *datacatalog.GetOrExtendReservationRequest) (*datacatalog.GetOrExtendReservationResponse, error) { - reservationID := request.ReservationId + reservationID := request.GetReservationId() // Use minimum of maxHeartbeatInterval and requested heartbeat interval heartbeatInterval := r.maxHeartbeatInterval @@ -97,7 +97,7 @@ func (r *reservationManager) GetOrExtendReservation(ctx context.Context, request heartbeatInterval = requestHeartbeatInterval.AsDuration() } - reservation, err := r.tryAcquireReservation(ctx, reservationID, request.OwnerId, heartbeatInterval) + reservation, err := r.tryAcquireReservation(ctx, reservationID, request.GetOwnerId(), heartbeatInterval) if err != nil { r.systemMetrics.acquireReservationFailure.Inc(ctx) return nil, err @@ -189,12 +189,12 @@ func (r *reservationManager) tryAcquireReservation(ctx context.Context, reservat // Release an active reservation with the specified owner. If one does not exist, gracefully return. func (r *reservationManager) ReleaseReservation(ctx context.Context, request *datacatalog.ReleaseReservationRequest) (*datacatalog.ReleaseReservationResponse, error) { repo := r.repo.ReservationRepo() - reservationKey := transformers.FromReservationID(request.ReservationId) + reservationKey := transformers.FromReservationID(request.GetReservationId()) - err := repo.Delete(ctx, reservationKey, request.OwnerId) + err := repo.Delete(ctx, reservationKey, request.GetOwnerId()) if err != nil { if errors.IsDoesNotExistError(err) { - logger.Warnf(ctx, "Reservation does not exist id: %+v, err %v", request.ReservationId, err) + logger.Warnf(ctx, "Reservation does not exist id: %+v, err %v", request.GetReservationId(), err) r.systemMetrics.reservationDoesNotExist.Inc(ctx) return &datacatalog.ReleaseReservationResponse{}, nil } diff --git a/datacatalog/pkg/manager/impl/reservation_manager_test.go b/datacatalog/pkg/manager/impl/reservation_manager_test.go index 0dd7408792..1281b7df89 100644 --- a/datacatalog/pkg/manager/impl/reservation_manager_test.go +++ b/datacatalog/pkg/manager/impl/reservation_manager_test.go @@ -50,10 +50,10 @@ func TestGetOrExtendReservation_CreateReservation(t *testing.T) { dcRepo.MockReservationRepo.On("Get", mock.MatchedBy(func(ctx context.Context) bool { return true }), mock.MatchedBy(func(key models.ReservationKey) bool { - return key.DatasetProject == datasetID.Project && - key.DatasetDomain == datasetID.Domain && - key.DatasetVersion == datasetID.Version && - key.DatasetName == datasetID.Name && + return key.DatasetProject == datasetID.GetProject() && + key.DatasetDomain == datasetID.GetDomain() && + key.DatasetVersion == datasetID.GetVersion() && + key.DatasetName == datasetID.GetName() && key.TagName == tagName })).Return(models.Reservation{}, errors2.NewDataCatalogErrorf(codes.NotFound, "entry not found")) @@ -62,10 +62,10 @@ func TestGetOrExtendReservation_CreateReservation(t *testing.T) { dcRepo.MockReservationRepo.On("Create", mock.MatchedBy(func(ctx context.Context) bool { return true }), mock.MatchedBy(func(reservation models.Reservation) bool { - return reservation.DatasetProject == datasetID.Project && - reservation.DatasetDomain == datasetID.Domain && - reservation.DatasetName == datasetID.Name && - reservation.DatasetVersion == datasetID.Version && + return reservation.DatasetProject == datasetID.GetProject() && + reservation.DatasetDomain == datasetID.GetDomain() && + reservation.DatasetName == datasetID.GetName() && + reservation.DatasetVersion == datasetID.GetVersion() && reservation.TagName == tagName && reservation.OwnerID == currentOwner && reservation.ExpiresAt == now.Add(heartbeatInterval*heartbeatGracePeriodMultiplier) @@ -86,8 +86,8 @@ func TestGetOrExtendReservation_CreateReservation(t *testing.T) { resp, err := reservationManager.GetOrExtendReservation(context.Background(), &req) assert.Nil(t, err) - assert.Equal(t, currentOwner, resp.GetReservation().OwnerId) - assert.Equal(t, heartbeatIntervalPb, resp.GetReservation().HeartbeatInterval) + assert.Equal(t, currentOwner, resp.GetReservation().GetOwnerId()) + assert.Equal(t, heartbeatIntervalPb, resp.GetReservation().GetHeartbeatInterval()) } func TestGetOrExtendReservation_MaxHeartbeatInterval(t *testing.T) { @@ -98,10 +98,10 @@ func TestGetOrExtendReservation_MaxHeartbeatInterval(t *testing.T) { dcRepo.MockReservationRepo.On("Get", mock.MatchedBy(func(ctx context.Context) bool { return true }), mock.MatchedBy(func(key models.ReservationKey) bool { - return key.DatasetProject == datasetID.Project && - key.DatasetDomain == datasetID.Domain && - key.DatasetVersion == datasetID.Version && - key.DatasetName == datasetID.Name && + return key.DatasetProject == datasetID.GetProject() && + key.DatasetDomain == datasetID.GetDomain() && + key.DatasetVersion == datasetID.GetVersion() && + key.DatasetName == datasetID.GetName() && key.TagName == tagName })).Return(models.Reservation{}, errors2.NewDataCatalogErrorf(codes.NotFound, "entry not found")) @@ -110,10 +110,10 @@ func TestGetOrExtendReservation_MaxHeartbeatInterval(t *testing.T) { dcRepo.MockReservationRepo.On("Create", mock.MatchedBy(func(ctx context.Context) bool { return true }), mock.MatchedBy(func(reservation models.Reservation) bool { - return reservation.DatasetProject == datasetID.Project && - reservation.DatasetDomain == datasetID.Domain && - reservation.DatasetName == datasetID.Name && - reservation.DatasetVersion == datasetID.Version && + return reservation.DatasetProject == datasetID.GetProject() && + reservation.DatasetDomain == datasetID.GetDomain() && + reservation.DatasetName == datasetID.GetName() && + reservation.DatasetVersion == datasetID.GetVersion() && reservation.TagName == tagName && reservation.OwnerID == currentOwner && reservation.ExpiresAt == now.Add(heartbeatInterval*heartbeatGracePeriodMultiplier) @@ -134,8 +134,8 @@ func TestGetOrExtendReservation_MaxHeartbeatInterval(t *testing.T) { resp, err := reservationManager.GetOrExtendReservation(context.Background(), &req) assert.Nil(t, err) - assert.Equal(t, currentOwner, resp.GetReservation().OwnerId) - assert.Equal(t, heartbeatIntervalPb, resp.GetReservation().HeartbeatInterval) + assert.Equal(t, currentOwner, resp.GetReservation().GetOwnerId()) + assert.Equal(t, heartbeatIntervalPb, resp.GetReservation().GetHeartbeatInterval()) } func TestGetOrExtendReservation_ExtendReservation(t *testing.T) { @@ -151,10 +151,10 @@ func TestGetOrExtendReservation_ExtendReservation(t *testing.T) { dcRepo.MockReservationRepo.On("Update", mock.MatchedBy(func(ctx context.Context) bool { return true }), mock.MatchedBy(func(reservation models.Reservation) bool { - return reservation.DatasetProject == datasetID.Project && - reservation.DatasetDomain == datasetID.Domain && - reservation.DatasetName == datasetID.Name && - reservation.DatasetVersion == datasetID.Version && + return reservation.DatasetProject == datasetID.GetProject() && + reservation.DatasetDomain == datasetID.GetDomain() && + reservation.DatasetName == datasetID.GetName() && + reservation.DatasetVersion == datasetID.GetVersion() && reservation.TagName == tagName && reservation.OwnerID == prevOwner && reservation.ExpiresAt == now.Add(heartbeatInterval*heartbeatGracePeriodMultiplier) @@ -175,7 +175,7 @@ func TestGetOrExtendReservation_ExtendReservation(t *testing.T) { resp, err := reservationManager.GetOrExtendReservation(context.Background(), &req) assert.Nil(t, err) - assert.Equal(t, prevOwner, resp.GetReservation().OwnerId) + assert.Equal(t, prevOwner, resp.GetReservation().GetOwnerId()) } func TestGetOrExtendReservation_TakeOverReservation(t *testing.T) { @@ -191,10 +191,10 @@ func TestGetOrExtendReservation_TakeOverReservation(t *testing.T) { dcRepo.MockReservationRepo.On("Update", mock.MatchedBy(func(ctx context.Context) bool { return true }), mock.MatchedBy(func(reservation models.Reservation) bool { - return reservation.DatasetProject == datasetID.Project && - reservation.DatasetDomain == datasetID.Domain && - reservation.DatasetName == datasetID.Name && - reservation.DatasetVersion == datasetID.Version && + return reservation.DatasetProject == datasetID.GetProject() && + reservation.DatasetDomain == datasetID.GetDomain() && + reservation.DatasetName == datasetID.GetName() && + reservation.DatasetVersion == datasetID.GetVersion() && reservation.TagName == tagName && reservation.OwnerID == currentOwner && reservation.ExpiresAt == now.Add(heartbeatInterval*heartbeatGracePeriodMultiplier) @@ -215,7 +215,7 @@ func TestGetOrExtendReservation_TakeOverReservation(t *testing.T) { resp, err := reservationManager.GetOrExtendReservation(context.Background(), &req) assert.Nil(t, err) - assert.Equal(t, currentOwner, resp.GetReservation().OwnerId) + assert.Equal(t, currentOwner, resp.GetReservation().GetOwnerId()) } func TestGetOrExtendReservation_ReservationExists(t *testing.T) { @@ -241,7 +241,7 @@ func TestGetOrExtendReservation_ReservationExists(t *testing.T) { resp, err := reservationManager.GetOrExtendReservation(context.Background(), &req) assert.Nil(t, err) - assert.Equal(t, prevOwner, resp.GetReservation().OwnerId) + assert.Equal(t, prevOwner, resp.GetReservation().GetOwnerId()) } func TestReleaseReservation(t *testing.T) { @@ -252,10 +252,10 @@ func TestReleaseReservation(t *testing.T) { dcRepo.MockReservationRepo.On("Delete", mock.MatchedBy(func(ctx context.Context) bool { return true }), mock.MatchedBy(func(reservationKey models.ReservationKey) bool { - return reservationKey.DatasetProject == datasetID.Project && - reservationKey.DatasetDomain == datasetID.Domain && - reservationKey.DatasetName == datasetID.Name && - reservationKey.DatasetVersion == datasetID.Version && + return reservationKey.DatasetProject == datasetID.GetProject() && + reservationKey.DatasetDomain == datasetID.GetDomain() && + reservationKey.DatasetName == datasetID.GetName() && + reservationKey.DatasetVersion == datasetID.GetVersion() && reservationKey.TagName == tagName }), mock.MatchedBy(func(ownerID string) bool { @@ -286,10 +286,10 @@ func TestReleaseReservation_Failure(t *testing.T) { dcRepo.MockReservationRepo.On("Delete", mock.MatchedBy(func(ctx context.Context) bool { return true }), mock.MatchedBy(func(reservationKey models.ReservationKey) bool { - return reservationKey.DatasetProject == datasetID.Project && - reservationKey.DatasetDomain == datasetID.Domain && - reservationKey.DatasetName == datasetID.Name && - reservationKey.DatasetVersion == datasetID.Version && + return reservationKey.DatasetProject == datasetID.GetProject() && + reservationKey.DatasetDomain == datasetID.GetDomain() && + reservationKey.DatasetName == datasetID.GetName() && + reservationKey.DatasetVersion == datasetID.GetVersion() && reservationKey.TagName == tagName }), mock.MatchedBy(func(ownerID string) bool { @@ -324,10 +324,10 @@ func TestReleaseReservation_GracefulFailure(t *testing.T) { dcRepo.MockReservationRepo.On("Delete", mock.MatchedBy(func(ctx context.Context) bool { return true }), mock.MatchedBy(func(reservationKey models.ReservationKey) bool { - return reservationKey.DatasetProject == datasetID.Project && - reservationKey.DatasetDomain == datasetID.Domain && - reservationKey.DatasetName == datasetID.Name && - reservationKey.DatasetVersion == datasetID.Version && + return reservationKey.DatasetProject == datasetID.GetProject() && + reservationKey.DatasetDomain == datasetID.GetDomain() && + reservationKey.DatasetName == datasetID.GetName() && + reservationKey.DatasetVersion == datasetID.GetVersion() && reservationKey.TagName == tagName }), mock.MatchedBy(func(ownerID string) bool { @@ -360,10 +360,10 @@ func setUpReservationRepoGet(dcRepo *mocks.DataCatalogRepo, prevExpiresAt time.T dcRepo.MockReservationRepo.On("Get", mock.MatchedBy(func(ctx context.Context) bool { return true }), mock.MatchedBy(func(key models.ReservationKey) bool { - return key.DatasetProject == datasetID.Project && - key.DatasetDomain == datasetID.Domain && - key.DatasetVersion == datasetID.Version && - key.DatasetName == datasetID.Name && + return key.DatasetProject == datasetID.GetProject() && + key.DatasetDomain == datasetID.GetDomain() && + key.DatasetVersion == datasetID.GetVersion() && + key.DatasetName == datasetID.GetName() && key.TagName == tagName })).Return( models.Reservation{ diff --git a/datacatalog/pkg/manager/impl/tag_manager.go b/datacatalog/pkg/manager/impl/tag_manager.go index 784af9164c..29280b83b4 100644 --- a/datacatalog/pkg/manager/impl/tag_manager.go +++ b/datacatalog/pkg/manager/impl/tag_manager.go @@ -37,15 +37,15 @@ func (m *tagManager) AddTag(ctx context.Context, request *datacatalog.AddTagRequ timer := m.systemMetrics.createResponseTime.Start(ctx) defer timer.Stop() - if err := validators.ValidateTag(request.Tag); err != nil { + if err := validators.ValidateTag(request.GetTag()); err != nil { logger.Warnf(ctx, "Invalid get tag request %+v err: %v", request, err) m.systemMetrics.validationErrorCounter.Inc(ctx) return nil, err } // verify the artifact and dataset exists before adding a tag to it - datasetID := request.Tag.Dataset - ctx = contextutils.WithProjectDomain(ctx, datasetID.Project, datasetID.Domain) + datasetID := request.GetTag().GetDataset() + ctx = contextutils.WithProjectDomain(ctx, datasetID.GetProject(), datasetID.GetDomain()) datasetKey := transformers.FromDatasetID(datasetID) dataset, err := m.repo.DatasetRepo().Get(ctx, datasetKey) @@ -54,17 +54,17 @@ func (m *tagManager) AddTag(ctx context.Context, request *datacatalog.AddTagRequ return nil, err } - artifactKey := transformers.ToArtifactKey(datasetID, request.Tag.ArtifactId) + artifactKey := transformers.ToArtifactKey(datasetID, request.GetTag().GetArtifactId()) _, err = m.repo.ArtifactRepo().Get(ctx, artifactKey) if err != nil { m.systemMetrics.addTagFailureCounter.Inc(ctx) return nil, err } - tagKey := transformers.ToTagKey(datasetID, request.Tag.Name) + tagKey := transformers.ToTagKey(datasetID, request.GetTag().GetName()) err = m.repo.TagRepo().Create(ctx, models.Tag{ TagKey: tagKey, - ArtifactID: request.Tag.ArtifactId, + ArtifactID: request.GetTag().GetArtifactId(), DatasetUUID: dataset.UUID, }) if err != nil { diff --git a/datacatalog/pkg/manager/impl/tag_manager_test.go b/datacatalog/pkg/manager/impl/tag_manager_test.go index 98e4b41dfd..e77d3abbe1 100644 --- a/datacatalog/pkg/manager/impl/tag_manager_test.go +++ b/datacatalog/pkg/manager/impl/tag_manager_test.go @@ -129,7 +129,7 @@ func TestAddTag(t *testing.T) { _, err := tagManager.AddTag(context.Background(), &datacatalog.AddTagRequest{ Tag: &datacatalog.Tag{ ArtifactId: "noArtifact", - Dataset: getTestDataset().Id, + Dataset: getTestDataset().GetId(), }, }) @@ -143,7 +143,7 @@ func TestAddTag(t *testing.T) { _, err := tagManager.AddTag(context.Background(), &datacatalog.AddTagRequest{ Tag: &datacatalog.Tag{ Name: "noArtifact", - Dataset: getTestDataset().Id, + Dataset: getTestDataset().GetId(), }, }) diff --git a/datacatalog/pkg/manager/impl/validators/artifact_validator.go b/datacatalog/pkg/manager/impl/validators/artifact_validator.go index d4721e1597..caf1ad3d58 100644 --- a/datacatalog/pkg/manager/impl/validators/artifact_validator.go +++ b/datacatalog/pkg/manager/impl/validators/artifact_validator.go @@ -18,10 +18,10 @@ func ValidateGetArtifactRequest(request *datacatalog.GetArtifactRequest) error { return NewMissingArgumentError(fmt.Sprintf("one of %s/%s", artifactID, tagName)) } - switch request.QueryHandle.(type) { + switch request.GetQueryHandle().(type) { case *datacatalog.GetArtifactRequest_ArtifactId: - if request.Dataset != nil { - err := ValidateDatasetID(request.Dataset) + if request.GetDataset() != nil { + err := ValidateDatasetID(request.GetDataset()) if err != nil { return err } @@ -31,7 +31,7 @@ func ValidateGetArtifactRequest(request *datacatalog.GetArtifactRequest) error { return err } case *datacatalog.GetArtifactRequest_TagName: - if err := ValidateDatasetID(request.Dataset); err != nil { + if err := ValidateDatasetID(request.GetDataset()); err != nil { return err } @@ -58,15 +58,15 @@ func ValidateArtifact(artifact *datacatalog.Artifact) error { return NewMissingArgumentError(artifactEntity) } - if err := ValidateDatasetID(artifact.Dataset); err != nil { + if err := ValidateDatasetID(artifact.GetDataset()); err != nil { return err } - if err := ValidateEmptyStringField(artifact.Id, artifactID); err != nil { + if err := ValidateEmptyStringField(artifact.GetId(), artifactID); err != nil { return err } - if err := ValidateEmptyArtifactData(artifact.Data); err != nil { + if err := ValidateEmptyArtifactData(artifact.GetData()); err != nil { return err } @@ -75,16 +75,16 @@ func ValidateArtifact(artifact *datacatalog.Artifact) error { // Validate the list request and format the request with proper defaults if not provided func ValidateListArtifactRequest(request *datacatalog.ListArtifactsRequest) error { - if err := ValidateDatasetID(request.Dataset); err != nil { + if err := ValidateDatasetID(request.GetDataset()); err != nil { return err } - if err := ValidateArtifactFilterTypes(request.Filter.GetFilters()); err != nil { + if err := ValidateArtifactFilterTypes(request.GetFilter().GetFilters()); err != nil { return err } - if request.Pagination != nil { - err := ValidatePagination(request.Pagination) + if request.GetPagination() != nil { + err := ValidatePagination(request.GetPagination()) if err != nil { return err } @@ -108,10 +108,10 @@ func ValidateUpdateArtifactRequest(request *datacatalog.UpdateArtifactRequest) e return NewMissingArgumentError(fmt.Sprintf("one of %s/%s", artifactID, tagName)) } - switch request.QueryHandle.(type) { + switch request.GetQueryHandle().(type) { case *datacatalog.UpdateArtifactRequest_ArtifactId: - if request.Dataset != nil { - err := ValidateDatasetID(request.Dataset) + if request.GetDataset() != nil { + err := ValidateDatasetID(request.GetDataset()) if err != nil { return err } @@ -121,7 +121,7 @@ func ValidateUpdateArtifactRequest(request *datacatalog.UpdateArtifactRequest) e return err } case *datacatalog.UpdateArtifactRequest_TagName: - if err := ValidateDatasetID(request.Dataset); err != nil { + if err := ValidateDatasetID(request.GetDataset()); err != nil { return err } @@ -132,7 +132,7 @@ func ValidateUpdateArtifactRequest(request *datacatalog.UpdateArtifactRequest) e return NewInvalidArgumentError("QueryHandle", "invalid type") } - if err := ValidateEmptyArtifactData(request.Data); err != nil { + if err := ValidateEmptyArtifactData(request.GetData()); err != nil { return err } diff --git a/datacatalog/pkg/manager/impl/validators/dataset_validator.go b/datacatalog/pkg/manager/impl/validators/dataset_validator.go index 5ab010517f..3cd60c57f9 100644 --- a/datacatalog/pkg/manager/impl/validators/dataset_validator.go +++ b/datacatalog/pkg/manager/impl/validators/dataset_validator.go @@ -18,16 +18,16 @@ func ValidateDatasetID(ds *datacatalog.DatasetID) error { if ds == nil { return NewMissingArgumentError(datasetEntity) } - if err := ValidateEmptyStringField(ds.Project, datasetProject); err != nil { + if err := ValidateEmptyStringField(ds.GetProject(), datasetProject); err != nil { return err } - if err := ValidateEmptyStringField(ds.Domain, datasetDomain); err != nil { + if err := ValidateEmptyStringField(ds.GetDomain(), datasetDomain); err != nil { return err } - if err := ValidateEmptyStringField(ds.Name, datasetName); err != nil { + if err := ValidateEmptyStringField(ds.GetName(), datasetName); err != nil { return err } - if err := ValidateEmptyStringField(ds.Version, datasetVersion); err != nil { + if err := ValidateEmptyStringField(ds.GetVersion(), datasetVersion); err != nil { return err } return nil @@ -35,15 +35,15 @@ func ValidateDatasetID(ds *datacatalog.DatasetID) error { // Ensure list Datasets request is properly constructed func ValidateListDatasetsRequest(request *datacatalog.ListDatasetsRequest) error { - if request.Pagination != nil { - err := ValidatePagination(request.Pagination) + if request.GetPagination() != nil { + err := ValidatePagination(request.GetPagination()) if err != nil { return err } } // Datasets cannot be filtered by tag, partitions or artifacts - for _, filter := range request.Filter.GetFilters() { + for _, filter := range request.GetFilter().GetFilters() { if filter.GetTagFilter() != nil { return NewInvalidFilterError(common.Dataset, common.Tag) } else if filter.GetPartitionFilter() != nil { diff --git a/datacatalog/pkg/manager/impl/validators/errors.go b/datacatalog/pkg/manager/impl/validators/errors.go index dae123ebfd..eec033b8f9 100644 --- a/datacatalog/pkg/manager/impl/validators/errors.go +++ b/datacatalog/pkg/manager/impl/validators/errors.go @@ -14,13 +14,13 @@ const invalidArgFormat = "invalid value for %s, value:[%s]" const invalidFilterFormat = "%s cannot be filtered by %s properties" func NewMissingArgumentError(field string) error { - return errors.NewDataCatalogErrorf(codes.InvalidArgument, fmt.Sprintf(missingFieldFormat, field)) + return errors.NewDataCatalogErrorf(codes.InvalidArgument, fmt.Sprintf(missingFieldFormat, field)) //nolint } func NewInvalidArgumentError(field string, value string) error { - return errors.NewDataCatalogErrorf(codes.InvalidArgument, fmt.Sprintf(invalidArgFormat, field, value)) + return errors.NewDataCatalogErrorf(codes.InvalidArgument, fmt.Sprintf(invalidArgFormat, field, value)) //nolint } func NewInvalidFilterError(entity common.Entity, propertyEntity common.Entity) error { - return errors.NewDataCatalogErrorf(codes.InvalidArgument, fmt.Sprintf(invalidFilterFormat, entity, propertyEntity)) + return errors.NewDataCatalogErrorf(codes.InvalidArgument, fmt.Sprintf(invalidFilterFormat, entity, propertyEntity)) //nolint } diff --git a/datacatalog/pkg/manager/impl/validators/pagination_validator.go b/datacatalog/pkg/manager/impl/validators/pagination_validator.go index 7f37dbe7d5..19072bec74 100644 --- a/datacatalog/pkg/manager/impl/validators/pagination_validator.go +++ b/datacatalog/pkg/manager/impl/validators/pagination_validator.go @@ -27,18 +27,18 @@ func ValidateToken(token string) error { // Validate the pagination options and set default limits func ValidatePagination(options *datacatalog.PaginationOptions) error { - err := ValidateToken(options.Token) + err := ValidateToken(options.GetToken()) if err != nil { return err } - if options.SortKey != datacatalog.PaginationOptions_CREATION_TIME { - return errors.NewDataCatalogErrorf(codes.InvalidArgument, "Invalid sort key %v", options.SortKey) + if options.GetSortKey() != datacatalog.PaginationOptions_CREATION_TIME { + return errors.NewDataCatalogErrorf(codes.InvalidArgument, "Invalid sort key %v", options.GetSortKey()) } - if options.SortOrder != datacatalog.PaginationOptions_ASCENDING && - options.SortOrder != datacatalog.PaginationOptions_DESCENDING { - return errors.NewDataCatalogErrorf(codes.InvalidArgument, "Invalid sort order %v", options.SortOrder) + if options.GetSortOrder() != datacatalog.PaginationOptions_ASCENDING && + options.GetSortOrder() != datacatalog.PaginationOptions_DESCENDING { + return errors.NewDataCatalogErrorf(codes.InvalidArgument, "Invalid sort order %v", options.GetSortOrder()) } return nil diff --git a/datacatalog/pkg/manager/impl/validators/partition_validator.go b/datacatalog/pkg/manager/impl/validators/partition_validator.go index 2b94e0e366..567cf300c6 100644 --- a/datacatalog/pkg/manager/impl/validators/partition_validator.go +++ b/datacatalog/pkg/manager/impl/validators/partition_validator.go @@ -39,15 +39,15 @@ func ValidatePartitions(datasetPartitionKeys []string, artifactPartitions []*dat continue } - if err := ValidateEmptyStringField(partitionKeyName, artifactPartition.Key); err != nil { + if err := ValidateEmptyStringField(partitionKeyName, artifactPartition.GetKey()); err != nil { partitionErrors = append(partitionErrors, NewMissingArgumentError(fmt.Sprintf("%v[%v]", partitionKeyName, idx))) - } else if err := ValidateEmptyStringField(partitionValueName, artifactPartition.Value); err != nil { + } else if err := ValidateEmptyStringField(partitionValueName, artifactPartition.GetValue()); err != nil { partitionErrors = append(partitionErrors, NewMissingArgumentError(fmt.Sprintf("%v[%v]", partitionValueName, idx))) } else { - _, ok := partitionKeyMatches[artifactPartition.Key] + _, ok := partitionKeyMatches[artifactPartition.GetKey()] if ok { - partitionKeyMatches[artifactPartition.Key] = true + partitionKeyMatches[artifactPartition.GetKey()] = true } else { keyMismatch = true } diff --git a/datacatalog/pkg/manager/impl/validators/tag_validator.go b/datacatalog/pkg/manager/impl/validators/tag_validator.go index 4539ad03c2..7fda9c873d 100644 --- a/datacatalog/pkg/manager/impl/validators/tag_validator.go +++ b/datacatalog/pkg/manager/impl/validators/tag_validator.go @@ -13,15 +13,15 @@ func ValidateTag(tag *datacatalog.Tag) error { if tag == nil { return NewMissingArgumentError(tagEntity) } - if err := ValidateDatasetID(tag.Dataset); err != nil { + if err := ValidateDatasetID(tag.GetDataset()); err != nil { return err } - if err := ValidateEmptyStringField(tag.Name, tagName); err != nil { + if err := ValidateEmptyStringField(tag.GetName(), tagName); err != nil { return err } - if err := ValidateEmptyStringField(tag.ArtifactId, artifactID); err != nil { + if err := ValidateEmptyStringField(tag.GetArtifactId(), artifactID); err != nil { return err } return nil diff --git a/datacatalog/pkg/repositories/errors/postgres.go b/datacatalog/pkg/repositories/errors/postgres.go index 2ab8a2895c..31e1c253d6 100644 --- a/datacatalog/pkg/repositories/errors/postgres.go +++ b/datacatalog/pkg/repositories/errors/postgres.go @@ -62,7 +62,7 @@ func (p *postgresErrorTransformer) ToDataCatalogError(err error) error { case undefinedTable: return catalogErrors.NewDataCatalogErrorf(codes.InvalidArgument, unsupportedTableOperation, pqError.Message) default: - return catalogErrors.NewDataCatalogErrorf(codes.Unknown, fmt.Sprintf(defaultPgError, pqError.Code, pqError.Message)) + return catalogErrors.NewDataCatalogErrorf(codes.Unknown, fmt.Sprintf(defaultPgError, pqError.Code, pqError.Message)) //nolint } } diff --git a/datacatalog/pkg/repositories/transformers/artifact.go b/datacatalog/pkg/repositories/transformers/artifact.go index 57890ef4b1..c962fd5ce1 100644 --- a/datacatalog/pkg/repositories/transformers/artifact.go +++ b/datacatalog/pkg/repositories/transformers/artifact.go @@ -18,29 +18,29 @@ func SerializedMetadata(metadata *datacatalog.Metadata) ([]byte, error) { } func CreateArtifactModel(request *datacatalog.CreateArtifactRequest, artifactData []models.ArtifactData, dataset models.Dataset) (models.Artifact, error) { - datasetID := request.Artifact.Dataset + datasetID := request.GetArtifact().GetDataset() - serializedMetadata, err := marshalMetadata(request.Artifact.Metadata) + serializedMetadata, err := marshalMetadata(request.GetArtifact().GetMetadata()) if err != nil { return models.Artifact{}, err } - partitions := make([]models.Partition, len(request.Artifact.Partitions)) - for i, partition := range request.Artifact.GetPartitions() { + partitions := make([]models.Partition, len(request.GetArtifact().GetPartitions())) + for i, partition := range request.GetArtifact().GetPartitions() { partitions[i] = models.Partition{ DatasetUUID: dataset.UUID, - Key: partition.Key, - Value: partition.Value, + Key: partition.GetKey(), + Value: partition.GetValue(), } } return models.Artifact{ ArtifactKey: models.ArtifactKey{ - DatasetProject: datasetID.Project, - DatasetDomain: datasetID.Domain, - DatasetName: datasetID.Name, - DatasetVersion: datasetID.Version, - ArtifactID: request.Artifact.Id, + DatasetProject: datasetID.GetProject(), + DatasetDomain: datasetID.GetDomain(), + DatasetName: datasetID.GetName(), + DatasetVersion: datasetID.GetVersion(), + ArtifactID: request.GetArtifact().GetId(), }, DatasetUUID: dataset.UUID, ArtifactData: artifactData, @@ -112,10 +112,10 @@ func ToArtifactKey(datasetID *datacatalog.DatasetID, artifactID string) models.A ArtifactID: artifactID, } if datasetID != nil { - artifactKey.DatasetProject = datasetID.Project - artifactKey.DatasetDomain = datasetID.Domain - artifactKey.DatasetName = datasetID.Name - artifactKey.DatasetVersion = datasetID.Version + artifactKey.DatasetProject = datasetID.GetProject() + artifactKey.DatasetDomain = datasetID.GetDomain() + artifactKey.DatasetName = datasetID.GetName() + artifactKey.DatasetVersion = datasetID.GetVersion() } return artifactKey } diff --git a/datacatalog/pkg/repositories/transformers/artifact_test.go b/datacatalog/pkg/repositories/transformers/artifact_test.go index 350a2396aa..5c556fcabb 100644 --- a/datacatalog/pkg/repositories/transformers/artifact_test.go +++ b/datacatalog/pkg/repositories/transformers/artifact_test.go @@ -50,11 +50,11 @@ func getTestTags() []models.Tag { func getDatasetModel() models.Dataset { return models.Dataset{ DatasetKey: models.DatasetKey{ - Project: datasetID.Project, - Domain: datasetID.Domain, - Name: datasetID.Name, - Version: datasetID.Version, - UUID: datasetID.UUID, + Project: datasetID.GetProject(), + Domain: datasetID.GetDomain(), + Name: datasetID.GetName(), + Version: datasetID.GetVersion(), + UUID: datasetID.GetUUID(), }, } } @@ -81,11 +81,11 @@ func TestCreateArtifactModel(t *testing.T) { artifactModel, err := CreateArtifactModel(createArtifactRequest, testArtifactData, getDatasetModel()) assert.NoError(t, err) - assert.Equal(t, artifactModel.ArtifactID, createArtifactRequest.Artifact.Id) - assert.Equal(t, artifactModel.ArtifactKey.DatasetProject, datasetID.Project) - assert.Equal(t, artifactModel.ArtifactKey.DatasetDomain, datasetID.Domain) - assert.Equal(t, artifactModel.ArtifactKey.DatasetName, datasetID.Name) - assert.Equal(t, artifactModel.ArtifactKey.DatasetVersion, datasetID.Version) + assert.Equal(t, artifactModel.ArtifactID, createArtifactRequest.GetArtifact().GetId()) + assert.Equal(t, artifactModel.ArtifactKey.DatasetProject, datasetID.GetProject()) + assert.Equal(t, artifactModel.ArtifactKey.DatasetDomain, datasetID.GetDomain()) + assert.Equal(t, artifactModel.ArtifactKey.DatasetName, datasetID.GetName()) + assert.Equal(t, artifactModel.ArtifactKey.DatasetVersion, datasetID.GetVersion()) assert.EqualValues(t, testArtifactData, artifactModel.ArtifactData) assert.EqualValues(t, getTestPartitions(), artifactModel.Partitions) } @@ -130,32 +130,32 @@ func TestFromArtifactModel(t *testing.T) { actual, err := FromArtifactModel(artifactModel) assert.NoError(t, err) - assert.Equal(t, artifactModel.ArtifactID, actual.Id) - assert.Equal(t, artifactModel.DatasetProject, actual.Dataset.Project) - assert.Equal(t, artifactModel.DatasetDomain, actual.Dataset.Domain) - assert.Equal(t, artifactModel.DatasetName, actual.Dataset.Name) - assert.Equal(t, artifactModel.DatasetVersion, actual.Dataset.Version) + assert.Equal(t, artifactModel.ArtifactID, actual.GetId()) + assert.Equal(t, artifactModel.DatasetProject, actual.GetDataset().GetProject()) + assert.Equal(t, artifactModel.DatasetDomain, actual.GetDataset().GetDomain()) + assert.Equal(t, artifactModel.DatasetName, actual.GetDataset().GetName()) + assert.Equal(t, artifactModel.DatasetVersion, actual.GetDataset().GetVersion()) - assert.Len(t, actual.Partitions, 2) - assert.EqualValues(t, artifactModel.Partitions[0].Key, actual.Partitions[0].Key) - assert.EqualValues(t, artifactModel.Partitions[0].Value, actual.Partitions[0].Value) - assert.EqualValues(t, artifactModel.Partitions[1].Value, actual.Partitions[1].Value) - assert.EqualValues(t, artifactModel.Partitions[1].Value, actual.Partitions[1].Value) + assert.Len(t, actual.GetPartitions(), 2) + assert.EqualValues(t, artifactModel.Partitions[0].Key, actual.GetPartitions()[0].GetKey()) + assert.EqualValues(t, artifactModel.Partitions[0].Value, actual.GetPartitions()[0].GetValue()) + assert.EqualValues(t, artifactModel.Partitions[1].Value, actual.GetPartitions()[1].GetValue()) + assert.EqualValues(t, artifactModel.Partitions[1].Value, actual.GetPartitions()[1].GetValue()) - assert.Len(t, actual.Tags, 1) - assert.EqualValues(t, artifactModel.Tags[0].TagName, actual.Tags[0].Name) + assert.Len(t, actual.GetTags(), 1) + assert.EqualValues(t, artifactModel.Tags[0].TagName, actual.GetTags()[0].GetName()) timestampProto, err := ptypes.TimestampProto(createdAt) assert.NoError(t, err) - assert.Equal(t, actual.CreatedAt, timestampProto) + assert.Equal(t, actual.GetCreatedAt(), timestampProto) } func TestToArtifactKey(t *testing.T) { artifactKey := ToArtifactKey(datasetID, "artifactID-1") - assert.Equal(t, datasetID.Project, artifactKey.DatasetProject) - assert.Equal(t, datasetID.Domain, artifactKey.DatasetDomain) - assert.Equal(t, datasetID.Name, artifactKey.DatasetName) - assert.Equal(t, datasetID.Version, artifactKey.DatasetVersion) + assert.Equal(t, datasetID.GetProject(), artifactKey.DatasetProject) + assert.Equal(t, datasetID.GetDomain(), artifactKey.DatasetDomain) + assert.Equal(t, datasetID.GetName(), artifactKey.DatasetName) + assert.Equal(t, datasetID.GetVersion(), artifactKey.DatasetVersion) assert.Equal(t, artifactKey.ArtifactID, "artifactID-1") } diff --git a/datacatalog/pkg/repositories/transformers/dataset.go b/datacatalog/pkg/repositories/transformers/dataset.go index 9d5cb168a4..bc8f86e4be 100644 --- a/datacatalog/pkg/repositories/transformers/dataset.go +++ b/datacatalog/pkg/repositories/transformers/dataset.go @@ -7,12 +7,12 @@ import ( // Create a dataset model from the Dataset api object. This will serialize the metadata in the dataset as part of the transform func CreateDatasetModel(dataset *datacatalog.Dataset) (*models.Dataset, error) { - serializedMetadata, err := marshalMetadata(dataset.Metadata) + serializedMetadata, err := marshalMetadata(dataset.GetMetadata()) if err != nil { return nil, err } - partitionKeys := make([]models.PartitionKey, len(dataset.PartitionKeys)) + partitionKeys := make([]models.PartitionKey, len(dataset.GetPartitionKeys())) for i, partitionKey := range dataset.GetPartitionKeys() { partitionKeys[i] = models.PartitionKey{ @@ -22,11 +22,11 @@ func CreateDatasetModel(dataset *datacatalog.Dataset) (*models.Dataset, error) { return &models.Dataset{ DatasetKey: models.DatasetKey{ - Project: dataset.Id.Project, - Domain: dataset.Id.Domain, - Name: dataset.Id.Name, - Version: dataset.Id.Version, - UUID: dataset.Id.UUID, + Project: dataset.GetId().GetProject(), + Domain: dataset.GetId().GetDomain(), + Name: dataset.GetId().GetName(), + Version: dataset.GetId().GetVersion(), + UUID: dataset.GetId().GetUUID(), }, SerializedMetadata: serializedMetadata, PartitionKeys: partitionKeys, @@ -36,11 +36,11 @@ func CreateDatasetModel(dataset *datacatalog.Dataset) (*models.Dataset, error) { // Create a dataset ID from the dataset key model func FromDatasetID(datasetID *datacatalog.DatasetID) models.DatasetKey { return models.DatasetKey{ - Project: datasetID.Project, - Domain: datasetID.Domain, - Name: datasetID.Name, - Version: datasetID.Version, - UUID: datasetID.UUID, + Project: datasetID.GetProject(), + Domain: datasetID.GetDomain(), + Name: datasetID.GetName(), + Version: datasetID.GetVersion(), + UUID: datasetID.GetUUID(), } } diff --git a/datacatalog/pkg/repositories/transformers/dataset_test.go b/datacatalog/pkg/repositories/transformers/dataset_test.go index 25062cf264..39e0e7ec3d 100644 --- a/datacatalog/pkg/repositories/transformers/dataset_test.go +++ b/datacatalog/pkg/repositories/transformers/dataset_test.go @@ -25,11 +25,11 @@ var datasetID = &datacatalog.DatasetID{ } func assertDatasetIDEqualsModel(t *testing.T, idlDataset *datacatalog.DatasetID, model *models.DatasetKey) { - assert.Equal(t, idlDataset.Project, model.Project) - assert.Equal(t, idlDataset.Domain, model.Domain) - assert.Equal(t, idlDataset.Name, model.Name) - assert.Equal(t, idlDataset.Version, model.Version) - assert.Equal(t, idlDataset.UUID, model.UUID) + assert.Equal(t, idlDataset.GetProject(), model.Project) + assert.Equal(t, idlDataset.GetDomain(), model.Domain) + assert.Equal(t, idlDataset.GetName(), model.Name) + assert.Equal(t, idlDataset.GetVersion(), model.Version) + assert.Equal(t, idlDataset.GetUUID(), model.UUID) } func TestCreateDatasetModelNoParitions(t *testing.T) { @@ -40,11 +40,11 @@ func TestCreateDatasetModelNoParitions(t *testing.T) { datasetModel, err := CreateDatasetModel(dataset) assert.NoError(t, err) - assertDatasetIDEqualsModel(t, dataset.Id, &datasetModel.DatasetKey) + assertDatasetIDEqualsModel(t, dataset.GetId(), &datasetModel.DatasetKey) unmarshaledMetadata, err := unmarshalMetadata(datasetModel.SerializedMetadata) assert.NoError(t, err) - assert.EqualValues(t, unmarshaledMetadata.KeyMap, metadata.KeyMap) + assert.EqualValues(t, unmarshaledMetadata.GetKeyMap(), metadata.GetKeyMap()) assert.Len(t, datasetModel.PartitionKeys, 0) } @@ -58,15 +58,15 @@ func TestCreateDatasetModel(t *testing.T) { datasetModel, err := CreateDatasetModel(dataset) assert.NoError(t, err) - assertDatasetIDEqualsModel(t, dataset.Id, &datasetModel.DatasetKey) + assertDatasetIDEqualsModel(t, dataset.GetId(), &datasetModel.DatasetKey) unmarshaledMetadata, err := unmarshalMetadata(datasetModel.SerializedMetadata) assert.NoError(t, err) - assert.EqualValues(t, unmarshaledMetadata.KeyMap, metadata.KeyMap) + assert.EqualValues(t, unmarshaledMetadata.GetKeyMap(), metadata.GetKeyMap()) assert.Len(t, datasetModel.PartitionKeys, 2) - assert.Equal(t, datasetModel.PartitionKeys[0], models.PartitionKey{Name: dataset.PartitionKeys[0]}) - assert.Equal(t, datasetModel.PartitionKeys[1], models.PartitionKey{Name: dataset.PartitionKeys[1]}) + assert.Equal(t, datasetModel.PartitionKeys[0], models.PartitionKey{Name: dataset.GetPartitionKeys()[0]}) + assert.Equal(t, datasetModel.PartitionKeys[1], models.PartitionKey{Name: dataset.GetPartitionKeys()[1]}) } func TestFromDatasetID(t *testing.T) { @@ -86,9 +86,9 @@ func TestFromDatasetModelNoPartitionsOrMetadata(t *testing.T) { } dataset, err := FromDatasetModel(*datasetModel) assert.NoError(t, err) - assertDatasetIDEqualsModel(t, dataset.Id, &datasetModel.DatasetKey) - assert.Len(t, dataset.Metadata.KeyMap, 0) - assert.Len(t, dataset.PartitionKeys, 0) + assertDatasetIDEqualsModel(t, dataset.GetId(), &datasetModel.DatasetKey) + assert.Len(t, dataset.GetMetadata().GetKeyMap(), 0) + assert.Len(t, dataset.GetPartitionKeys(), 0) } func TestFromDatasetModelWithPartitions(t *testing.T) { @@ -108,8 +108,8 @@ func TestFromDatasetModelWithPartitions(t *testing.T) { } dataset, err := FromDatasetModel(*datasetModel) assert.NoError(t, err) - assertDatasetIDEqualsModel(t, dataset.Id, &datasetModel.DatasetKey) - assert.Len(t, dataset.Metadata.KeyMap, 2) - assert.EqualValues(t, dataset.Metadata.KeyMap, metadata.KeyMap) - assert.Len(t, dataset.PartitionKeys, 2) + assertDatasetIDEqualsModel(t, dataset.GetId(), &datasetModel.DatasetKey) + assert.Len(t, dataset.GetMetadata().GetKeyMap(), 2) + assert.EqualValues(t, dataset.GetMetadata().GetKeyMap(), metadata.GetKeyMap()) + assert.Len(t, dataset.GetPartitionKeys(), 2) } diff --git a/datacatalog/pkg/repositories/transformers/filters.go b/datacatalog/pkg/repositories/transformers/filters.go index c4ed8b6f08..0c6f083ee4 100644 --- a/datacatalog/pkg/repositories/transformers/filters.go +++ b/datacatalog/pkg/repositories/transformers/filters.go @@ -44,7 +44,7 @@ func FilterToListInput(ctx context.Context, sourceEntity common.Entity, filterEx } func constructModelFilter(ctx context.Context, singleFilter *datacatalog.SinglePropertyFilter, sourceEntity common.Entity) (models.ModelFilter, error) { - operator := comparisonOperatorMap[singleFilter.Operator] + operator := comparisonOperatorMap[singleFilter.GetOperator()] var modelFilter models.ModelFilter switch propertyFilter := singleFilter.GetPropertyFilter().(type) { @@ -53,8 +53,8 @@ func constructModelFilter(ctx context.Context, singleFilter *datacatalog.SingleP switch partitionProperty := partitionPropertyFilter.GetProperty().(type) { case *datacatalog.PartitionPropertyFilter_KeyVal: - key := partitionProperty.KeyVal.Key - value := partitionProperty.KeyVal.Value + key := partitionProperty.KeyVal.GetKey() + value := partitionProperty.KeyVal.GetValue() logger.Debugf(ctx, "Constructing partition key:[%v], val:[%v] filter", key, value) if err := validators.ValidateEmptyStringField(key, "PartitionKey"); err != nil { diff --git a/datacatalog/pkg/repositories/transformers/pagination.go b/datacatalog/pkg/repositories/transformers/pagination.go index 793779ab46..ed7a7925c1 100644 --- a/datacatalog/pkg/repositories/transformers/pagination.go +++ b/datacatalog/pkg/repositories/transformers/pagination.go @@ -23,18 +23,18 @@ func ApplyPagination(paginationOpts *datacatalog.PaginationOptions, input *model if paginationOpts != nil { // if the token is empty, that is still valid input since it is optional - if len(strings.Trim(paginationOpts.Token, " ")) == 0 { + if len(strings.Trim(paginationOpts.GetToken(), " ")) == 0 { offset = common.DefaultPageOffset } else { - parsedOffset, err := strconv.ParseInt(paginationOpts.Token, 10, 32) + parsedOffset, err := strconv.ParseInt(paginationOpts.GetToken(), 10, 32) if err != nil { return errors.NewDataCatalogErrorf(codes.InvalidArgument, "Invalid token %v", offset) } offset = int(parsedOffset) } - limit = int(paginationOpts.Limit) - sortKey = paginationOpts.SortKey - sortOrder = paginationOpts.SortOrder + limit = int(paginationOpts.GetLimit()) + sortKey = paginationOpts.GetSortKey() + sortOrder = paginationOpts.GetSortOrder() } input.Offset = offset diff --git a/datacatalog/pkg/repositories/transformers/reservation.go b/datacatalog/pkg/repositories/transformers/reservation.go index 2ae215be82..11edeb4f26 100644 --- a/datacatalog/pkg/repositories/transformers/reservation.go +++ b/datacatalog/pkg/repositories/transformers/reservation.go @@ -12,14 +12,14 @@ import ( ) func FromReservationID(reservationID *datacatalog.ReservationID) models.ReservationKey { - datasetID := reservationID.DatasetId + datasetID := reservationID.GetDatasetId() return models.ReservationKey{ - DatasetProject: datasetID.Project, - DatasetDomain: datasetID.Domain, - DatasetName: datasetID.Name, - DatasetVersion: datasetID.Version, - TagName: reservationID.TagName, + DatasetProject: datasetID.GetProject(), + DatasetDomain: datasetID.GetDomain(), + DatasetName: datasetID.GetName(), + DatasetVersion: datasetID.GetVersion(), + TagName: reservationID.GetTagName(), } } diff --git a/datacatalog/pkg/repositories/transformers/reservation_test.go b/datacatalog/pkg/repositories/transformers/reservation_test.go index 95ca7795ce..21b8e896fc 100644 --- a/datacatalog/pkg/repositories/transformers/reservation_test.go +++ b/datacatalog/pkg/repositories/transformers/reservation_test.go @@ -22,11 +22,11 @@ func TestFromReservationID(t *testing.T) { } reservationKey := FromReservationID(&reservationID) - assert.Equal(t, reservationKey.DatasetProject, reservationID.DatasetId.Project) - assert.Equal(t, reservationKey.DatasetName, reservationID.DatasetId.Name) - assert.Equal(t, reservationKey.DatasetDomain, reservationID.DatasetId.Domain) - assert.Equal(t, reservationKey.DatasetVersion, reservationID.DatasetId.Version) - assert.Equal(t, reservationKey.TagName, reservationID.TagName) + assert.Equal(t, reservationKey.DatasetProject, reservationID.GetDatasetId().GetProject()) + assert.Equal(t, reservationKey.DatasetName, reservationID.GetDatasetId().GetName()) + assert.Equal(t, reservationKey.DatasetDomain, reservationID.GetDatasetId().GetDomain()) + assert.Equal(t, reservationKey.DatasetVersion, reservationID.GetDatasetId().GetVersion()) + assert.Equal(t, reservationKey.TagName, reservationID.GetTagName()) } func TestCreateReservation(t *testing.T) { @@ -47,16 +47,16 @@ func TestCreateReservation(t *testing.T) { reservation, err := CreateReservation(&modelReservation, heartbeatInterval) assert.Equal(t, err, nil) - assert.Equal(t, reservation.ExpiresAt.AsTime(), modelReservation.ExpiresAt.UTC()) - assert.Equal(t, reservation.HeartbeatInterval.AsDuration(), heartbeatInterval) - assert.Equal(t, reservation.OwnerId, modelReservation.OwnerID) - - reservationID := reservation.ReservationId - assert.Equal(t, reservationID.TagName, modelReservation.TagName) - - datasetID := reservationID.DatasetId - assert.Equal(t, datasetID.Project, modelReservation.DatasetProject) - assert.Equal(t, datasetID.Name, modelReservation.DatasetName) - assert.Equal(t, datasetID.Domain, modelReservation.DatasetDomain) - assert.Equal(t, datasetID.Version, modelReservation.DatasetVersion) + assert.Equal(t, reservation.GetExpiresAt().AsTime(), modelReservation.ExpiresAt.UTC()) + assert.Equal(t, reservation.GetHeartbeatInterval().AsDuration(), heartbeatInterval) + assert.Equal(t, reservation.GetOwnerId(), modelReservation.OwnerID) + + reservationID := reservation.GetReservationId() + assert.Equal(t, reservationID.GetTagName(), modelReservation.TagName) + + datasetID := reservationID.GetDatasetId() + assert.Equal(t, datasetID.GetProject(), modelReservation.DatasetProject) + assert.Equal(t, datasetID.GetName(), modelReservation.DatasetName) + assert.Equal(t, datasetID.GetDomain(), modelReservation.DatasetDomain) + assert.Equal(t, datasetID.GetVersion(), modelReservation.DatasetVersion) } diff --git a/datacatalog/pkg/repositories/transformers/tag.go b/datacatalog/pkg/repositories/transformers/tag.go index df98e22200..7fe1f83220 100644 --- a/datacatalog/pkg/repositories/transformers/tag.go +++ b/datacatalog/pkg/repositories/transformers/tag.go @@ -7,10 +7,10 @@ import ( func ToTagKey(datasetID *datacatalog.DatasetID, tagName string) models.TagKey { return models.TagKey{ - DatasetProject: datasetID.Project, - DatasetDomain: datasetID.Domain, - DatasetName: datasetID.Name, - DatasetVersion: datasetID.Version, + DatasetProject: datasetID.GetProject(), + DatasetDomain: datasetID.GetDomain(), + DatasetName: datasetID.GetName(), + DatasetVersion: datasetID.GetVersion(), TagName: tagName, } } diff --git a/datacatalog/pkg/repositories/transformers/tag_test.go b/datacatalog/pkg/repositories/transformers/tag_test.go index c2820f6260..f77af243d6 100644 --- a/datacatalog/pkg/repositories/transformers/tag_test.go +++ b/datacatalog/pkg/repositories/transformers/tag_test.go @@ -22,10 +22,10 @@ func TestToTagKey(t *testing.T) { tagKey := ToTagKey(datasetID, tagName) assert.Equal(t, tagName, tagKey.TagName) - assert.Equal(t, datasetID.Project, tagKey.DatasetProject) - assert.Equal(t, datasetID.Domain, tagKey.DatasetDomain) - assert.Equal(t, datasetID.Name, tagKey.DatasetName) - assert.Equal(t, datasetID.Version, tagKey.DatasetVersion) + assert.Equal(t, datasetID.GetProject(), tagKey.DatasetProject) + assert.Equal(t, datasetID.GetDomain(), tagKey.DatasetDomain) + assert.Equal(t, datasetID.GetName(), tagKey.DatasetName) + assert.Equal(t, datasetID.GetVersion(), tagKey.DatasetVersion) } func TestFromTagModel(t *testing.T) { @@ -46,10 +46,10 @@ func TestFromTagModel(t *testing.T) { tag := FromTagModel(datasetID, tagModel) - assert.Equal(t, tag.Name, tagModel.TagName) - assert.Equal(t, datasetID.Project, tag.Dataset.Project) - assert.Equal(t, datasetID.Domain, tag.Dataset.Domain) - assert.Equal(t, datasetID.Name, tag.Dataset.Name) - assert.Equal(t, datasetID.Version, tag.Dataset.Version) - assert.Equal(t, datasetID.UUID, tag.Dataset.UUID) + assert.Equal(t, tag.GetName(), tagModel.TagName) + assert.Equal(t, datasetID.GetProject(), tag.GetDataset().GetProject()) + assert.Equal(t, datasetID.GetDomain(), tag.GetDataset().GetDomain()) + assert.Equal(t, datasetID.GetName(), tag.GetDataset().GetName()) + assert.Equal(t, datasetID.GetVersion(), tag.GetDataset().GetVersion()) + assert.Equal(t, datasetID.GetUUID(), tag.GetDataset().GetUUID()) } diff --git a/datacatalog/pkg/repositories/transformers/util_test.go b/datacatalog/pkg/repositories/transformers/util_test.go index bdbd1c642c..1d0c666e82 100644 --- a/datacatalog/pkg/repositories/transformers/util_test.go +++ b/datacatalog/pkg/repositories/transformers/util_test.go @@ -12,7 +12,7 @@ func TestMarshaling(t *testing.T) { unmarshaledMetadata, err := unmarshalMetadata(marshaledMetadata) assert.NoError(t, err) - assert.EqualValues(t, unmarshaledMetadata.KeyMap, metadata.KeyMap) + assert.EqualValues(t, unmarshaledMetadata.GetKeyMap(), metadata.GetKeyMap()) } func TestMarshalingWithNil(t *testing.T) { @@ -21,5 +21,5 @@ func TestMarshalingWithNil(t *testing.T) { var expectedKeymap map[string]string unmarshaledMetadata, err := unmarshalMetadata(marshaledMetadata) assert.NoError(t, err) - assert.EqualValues(t, expectedKeymap, unmarshaledMetadata.KeyMap) + assert.EqualValues(t, expectedKeymap, unmarshaledMetadata.GetKeyMap()) } diff --git a/deployment/eks/flyte_aws_scheduler_helm_generated.yaml b/deployment/eks/flyte_aws_scheduler_helm_generated.yaml index d7cb3500d6..71a3328681 100644 --- a/deployment/eks/flyte_aws_scheduler_helm_generated.yaml +++ b/deployment/eks/flyte_aws_scheduler_helm_generated.yaml @@ -751,22 +751,18 @@ spec: - name: http port: 80 protocol: TCP - appProtocol: TCP targetPort: 8088 - name: grpc port: 81 protocol: TCP # intentionally set to TCP instead of grpc - appProtocol: TCP targetPort: 8089 - name: redoc protocol: TCP - appProtocol: TCP port: 87 targetPort: 8087 - name: http-metrics protocol: TCP - appProtocol: TCP port: 10254 selector: app.kubernetes.io/name: flyteadmin @@ -789,7 +785,6 @@ spec: - name: http port: 80 protocol: TCP - appProtocol: TCP targetPort: 8080 selector: app.kubernetes.io/name: flyteconsole diff --git a/deployment/eks/flyte_helm_controlplane_generated.yaml b/deployment/eks/flyte_helm_controlplane_generated.yaml index 60ca7d1720..ad96bd5c73 100644 --- a/deployment/eks/flyte_helm_controlplane_generated.yaml +++ b/deployment/eks/flyte_helm_controlplane_generated.yaml @@ -474,22 +474,18 @@ spec: - name: http port: 80 protocol: TCP - appProtocol: TCP targetPort: 8088 - name: grpc port: 81 protocol: TCP # intentionally set to TCP instead of grpc - appProtocol: TCP targetPort: 8089 - name: redoc protocol: TCP - appProtocol: TCP port: 87 targetPort: 8087 - name: http-metrics protocol: TCP - appProtocol: TCP port: 10254 selector: app.kubernetes.io/name: flyteadmin @@ -512,7 +508,6 @@ spec: - name: http port: 80 protocol: TCP - appProtocol: TCP targetPort: 8080 selector: app.kubernetes.io/name: flyteconsole diff --git a/deployment/eks/flyte_helm_generated.yaml b/deployment/eks/flyte_helm_generated.yaml index 5e0ae72ec2..b568f084b3 100644 --- a/deployment/eks/flyte_helm_generated.yaml +++ b/deployment/eks/flyte_helm_generated.yaml @@ -782,22 +782,18 @@ spec: - name: http port: 80 protocol: TCP - appProtocol: TCP targetPort: 8088 - name: grpc port: 81 protocol: TCP # intentionally set to TCP instead of grpc - appProtocol: TCP targetPort: 8089 - name: redoc protocol: TCP - appProtocol: TCP port: 87 targetPort: 8087 - name: http-metrics protocol: TCP - appProtocol: TCP port: 10254 selector: app.kubernetes.io/name: flyteadmin @@ -820,7 +816,6 @@ spec: - name: http port: 80 protocol: TCP - appProtocol: TCP targetPort: 8080 selector: app.kubernetes.io/name: flyteconsole diff --git a/deployment/gcp/flyte_helm_controlplane_generated.yaml b/deployment/gcp/flyte_helm_controlplane_generated.yaml index 29367a5b37..56714541d9 100644 --- a/deployment/gcp/flyte_helm_controlplane_generated.yaml +++ b/deployment/gcp/flyte_helm_controlplane_generated.yaml @@ -488,22 +488,18 @@ spec: - name: http port: 80 protocol: TCP - appProtocol: TCP targetPort: 8088 - name: grpc port: 81 protocol: TCP # intentionally set to TCP instead of grpc - appProtocol: TCP targetPort: 8089 - name: redoc protocol: TCP - appProtocol: TCP port: 87 targetPort: 8087 - name: http-metrics protocol: TCP - appProtocol: TCP port: 10254 selector: app.kubernetes.io/name: flyteadmin @@ -526,7 +522,6 @@ spec: - name: http port: 80 protocol: TCP - appProtocol: TCP targetPort: 8080 selector: app.kubernetes.io/name: flyteconsole diff --git a/deployment/gcp/flyte_helm_generated.yaml b/deployment/gcp/flyte_helm_generated.yaml index ce1f64c1df..249392fab4 100644 --- a/deployment/gcp/flyte_helm_generated.yaml +++ b/deployment/gcp/flyte_helm_generated.yaml @@ -804,22 +804,18 @@ spec: - name: http port: 80 protocol: TCP - appProtocol: TCP targetPort: 8088 - name: grpc port: 81 protocol: TCP # intentionally set to TCP instead of grpc - appProtocol: TCP targetPort: 8089 - name: redoc protocol: TCP - appProtocol: TCP port: 87 targetPort: 8087 - name: http-metrics protocol: TCP - appProtocol: TCP port: 10254 selector: app.kubernetes.io/name: flyteadmin @@ -842,7 +838,6 @@ spec: - name: http port: 80 protocol: TCP - appProtocol: TCP targetPort: 8080 selector: app.kubernetes.io/name: flyteconsole diff --git a/deployment/sandbox/flyte_helm_generated.yaml b/deployment/sandbox/flyte_helm_generated.yaml index 22b4855352..792496d0c9 100644 --- a/deployment/sandbox/flyte_helm_generated.yaml +++ b/deployment/sandbox/flyte_helm_generated.yaml @@ -6145,22 +6145,18 @@ spec: - name: http port: 80 protocol: TCP - appProtocol: TCP targetPort: 8088 - name: grpc port: 81 protocol: TCP # intentionally set to TCP instead of grpc - appProtocol: TCP targetPort: 8089 - name: redoc protocol: TCP - appProtocol: TCP port: 87 targetPort: 8087 - name: http-metrics protocol: TCP - appProtocol: TCP port: 10254 selector: app.kubernetes.io/name: flyteadmin @@ -6183,7 +6179,6 @@ spec: - name: http port: 80 protocol: TCP - appProtocol: TCP targetPort: 8080 selector: app.kubernetes.io/name: flyteconsole diff --git a/docker/sandbox-bundled/manifests/complete-agent.yaml b/docker/sandbox-bundled/manifests/complete-agent.yaml index 6262b1640a..480592de00 100644 --- a/docker/sandbox-bundled/manifests/complete-agent.yaml +++ b/docker/sandbox-bundled/manifests/complete-agent.yaml @@ -517,6 +517,8 @@ data: - FLYTE_AWS_ENDPOINT: http://flyte-sandbox-minio.flyte:9000 - FLYTE_AWS_ACCESS_KEY_ID: minio - FLYTE_AWS_SECRET_ACCESS_KEY: miniostorage + - FLYTE_PLATFORM_URL: 'flyte-sandbox-grpc.flyte:8089' + - FLYTE_PLATFORM_INSECURE: true storage: signedURL: stowConfigOverride: @@ -819,7 +821,7 @@ type: Opaque --- apiVersion: v1 data: - haSharedSecret: MUpQYWd3RWdMbE9rTmZ3Uw== + haSharedSecret: enNEdDR0dExVcnl4SzFwcQ== proxyPassword: "" proxyUsername: "" kind: Secret @@ -1250,7 +1252,7 @@ spec: metadata: annotations: checksum/cluster-resource-templates: 6fd9b172465e3089fcc59f738b92b8dc4d8939360c19de8ee65f68b0e7422035 - checksum/configuration: a823eaadac5f3a4358c8acf628ebeb3719f88312af520d2c253de2579dff262d + checksum/configuration: 73e77e790b0ce72a7f3f7a81e4ca3f279f33c1aaea5e5172f61959f3960b0d7b checksum/configuration-secret: 09216ffaa3d29e14f88b1f30af580d02a2a5e014de4d750b7f275cc07ed4e914 labels: app.kubernetes.io/component: flyte-binary @@ -1416,7 +1418,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: c98dbe32a67a71797ba99a1e7f1c5d9ce159daba1ce9213f307de36307182e75 + checksum/secret: 85a71396ad8f3f7ac79cbc64439d42dd176edf59a41568fd1157f52343a2da42 labels: app: docker-registry release: flyte-sandbox diff --git a/docker/sandbox-bundled/manifests/complete.yaml b/docker/sandbox-bundled/manifests/complete.yaml index 7d19b5321a..7a911cf873 100644 --- a/docker/sandbox-bundled/manifests/complete.yaml +++ b/docker/sandbox-bundled/manifests/complete.yaml @@ -499,6 +499,8 @@ data: - FLYTE_AWS_ENDPOINT: http://flyte-sandbox-minio.flyte:9000 - FLYTE_AWS_ACCESS_KEY_ID: minio - FLYTE_AWS_SECRET_ACCESS_KEY: miniostorage + - FLYTE_PLATFORM_URL: 'flyte-sandbox-grpc.flyte:8089' + - FLYTE_PLATFORM_INSECURE: true storage: signedURL: stowConfigOverride: @@ -801,7 +803,7 @@ type: Opaque --- apiVersion: v1 data: - haSharedSecret: UVc1NklJQVV6SUJrdWx4bw== + haSharedSecret: b3pESlBJbFBGdjBHQjUzTQ== proxyPassword: "" proxyUsername: "" kind: Secret @@ -1199,7 +1201,7 @@ spec: metadata: annotations: checksum/cluster-resource-templates: 6fd9b172465e3089fcc59f738b92b8dc4d8939360c19de8ee65f68b0e7422035 - checksum/configuration: c2649df6bcb523f120c73b0fdeec5d9516f555eab12e4eae78b04dea2cf2abae + checksum/configuration: 956d7a4502a3f977583a7b1bf4da2e985d64c09c94f357798ce289efa25facd7 checksum/configuration-secret: 09216ffaa3d29e14f88b1f30af580d02a2a5e014de4d750b7f275cc07ed4e914 labels: app.kubernetes.io/component: flyte-binary @@ -1365,7 +1367,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: 8bb91d4715c3fa73d3ba293ae280b3712cdea39180b99789415d590927199169 + checksum/secret: beed8c901d51f3266dcdb7285b489264504d92a294a89b70320a4160fbbe3692 labels: app: docker-registry release: flyte-sandbox diff --git a/docker/sandbox-bundled/manifests/dev.yaml b/docker/sandbox-bundled/manifests/dev.yaml index 90aa424daf..1b3903b752 100644 --- a/docker/sandbox-bundled/manifests/dev.yaml +++ b/docker/sandbox-bundled/manifests/dev.yaml @@ -499,7 +499,7 @@ metadata: --- apiVersion: v1 data: - haSharedSecret: QmRyWnpiSHhzcW1PNWxoMA== + haSharedSecret: STVTaU13c1FuYWJMSHlXRw== proxyPassword: "" proxyUsername: "" kind: Secret @@ -934,7 +934,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: 95c3da89befe34c739968c1c0f2fd8f7e204c84d2d7012364be5149bc00a22cf + checksum/secret: e742ce61227d4ebab3fc34efeea1c67bd9386a4d83b94c4e8c00c95ab9b592c8 labels: app: docker-registry release: flyte-sandbox diff --git a/docker/sandbox/Dockerfile b/docker/sandbox/Dockerfile index 2a77a20472..40c3e83e83 100644 --- a/docker/sandbox/Dockerfile +++ b/docker/sandbox/Dockerfile @@ -52,13 +52,13 @@ COPY --from=base /flyteorg/ /flyteorg/ COPY docker/sandbox/flyte-entrypoint-default.sh /flyteorg/bin/flyte-entrypoint.sh ARG FLYTE_VERSION="latest" -ENV FLYTE_VERSION "${FLYTE_VERSION}" +ENV FLYTE_VERSION="${FLYTE_VERSION}" ARG FLYTE_TEST="release" -ENV FLYTE_TEST "${FLYTE_TEST}" +ENV FLYTE_TEST="${FLYTE_TEST}" # Update PATH variable -ENV PATH "/flyteorg/bin:${PATH}" +ENV PATH="/flyteorg/bin:${PATH}" # Declare volumes for k3s VOLUME /var/lib/kubelet diff --git a/docs/README.md b/docs/README.md index dc5cb7046c..576948df3a 100644 --- a/docs/README.md +++ b/docs/README.md @@ -24,6 +24,11 @@ This creates a new environment called `monodocs-env` with all the dependencies n In the `flyteorg/flyte` root directory make sure you have activated the `monodocs-env` (or whatever you called it) environment and do: +```bash +# need to set this to a fake value to build the docs locally +$ export DOCSEARCH_API_KEY=fake-api-key +``` + ```bash $ make docs ``` diff --git a/docs/community/contribute/contribute_code.rst b/docs/community/contribute/contribute_code.rst index c0cae7dade..21b05e20a2 100644 --- a/docs/community/contribute/contribute_code.rst +++ b/docs/community/contribute/contribute_code.rst @@ -25,12 +25,38 @@ To understand how the below components interact with each other, refer to :ref:` * - **Purpose**: Deployment, Documentation, and Issues * - **Languages**: RST -To build the Flyte docs locally you will need the following prerequisites: +In the ``flyteorg/flyte`` root directory you can run ``make dev-docs`` to build the documentation locally. The generated documentation will be in the ``docs/_build/html`` directory. +**Setup process** + +1. First you need to make sure you can run linux/amd64 container +2. Run the following commands to build the documentation and serve it locally + +.. prompt:: bash $ + + make dev-docs + python -m http.server --directory docs/_build/html + +3. Go to http://localhost:8000 to see the documentation. + +**Supported environment variables of** ``make dev-docs`` + +* ``DEV_DOCS_WATCH``: If set, the docs will be built and served using `sphinx-autobuild `__ for live updates. +* ``FLYTEKIT_LOCAL_PATH``: If set, the local path to flytekit will be used instead of the source code from the ``flyteorg/flytekit repo``. +* ``FLYTECTL_LOCAL_PATH``: If set, the local path to flytectl will be used instead of the source code from the ``flyteorg/flytectl repo``. +* ``FLYTESNACKS_LOCAL_PATH``: If set, the local path to flytesnacks will be used instead of the source code from the ``flyteorg/flytesnacks`` repo. + +For example, to use the local flytekit source code instead of the source code from the ``flyteorg/flytekit`` repo, run ``export FLYTEKIT_LOCAL_PATH=/path/to/flytekit`` before running ``make dev-docs``. + +**Alternative conda setup steps** + +* Install ``conda``. + * We recommend Miniconda installed with an `official installer `__. * Install `conda-lock `__. -* In the ``flyteorg/flyte`` root directory you can run: - * ``make dev-docs`` to build the documentation locally. The build will be in the ``docs/_build/html`` directory. See `the script `__ for additional environment variables that can be set. - * For example, to use the local flytekit source code instead of the source code from the flyteorg/flytekit repo, run ``export FLYTEKIT_LOCAL_PATH=/path/to/flytekit`` before running ``make dev-docs``. +* In the ``flyteorg/flyte`` root directory run: + * ``conda-lock install --name monodocs-env monodocs-environment.lock.yaml`` + * ``conda activate monodocs-env`` + * ``pip install ./flyteidl`` ``flyteidl`` ************ @@ -267,7 +293,7 @@ that integrates all Flyte components into a single binary. # Step 4: Running the single binary. # The POD_NAMESPACE environment variable is necessary for the webhook to function correctly. # You may encounter an error due to `ERROR: duplicate key value violates unique constraint`. Running the command again will solve the problem. - POD_NAMESPACE=flyte ./flyte start --config flyte-single-binary-local.yaml + POD_NAMESPACE=flyte flyte start --config flyte-single-binary-local.yaml # All logs from flyteadmin, flyteplugins, flytepropeller, etc. will appear in the terminal. @@ -301,7 +327,7 @@ The following instructions provide guidance on how to build single binary with y # Step 3: Now, you can build the single binary. Go back to Flyte directory. make go-tidy make compile - POD_NAMESPACE=flyte ./flyte start --config flyte-single-binary-local.yaml + POD_NAMESPACE=flyte flyte start --config flyte-single-binary-local.yaml **5. Test by running a hello world workflow.** @@ -403,7 +429,7 @@ If not, we can start backends with a single command. Before running your workflow in the sandbox, make sure you're able to successfully run it locally. To deploy the workflow in the sandbox, you'll need to build a Flytekit image. Create a Dockerfile in your Flytekit directory with the minimum required configuration to run a task, as shown below. -If your task requires additional components, such as plugins, you may find it useful to refer to the construction of the `officail flitekit image `__ +If your task requires additional components, such as plugins, you may find it useful to refer to the construction of the `official flytekit image `__ .. code:: Dockerfile diff --git a/docs/community/contribute/contribute_docs.md b/docs/community/contribute/contribute_docs.md index f97152032b..7c1d47ffc8 100644 --- a/docs/community/contribute/contribute_docs.md +++ b/docs/community/contribute/contribute_docs.md @@ -82,14 +82,14 @@ The following are some tips to include various content: * **Source code references (Embedded format)**
`.rst` example: ```{code-block} - .. rli:: https://raw.githubusercontent.com/flyteorg/// + .. literalinclude:: /examples/ :lines: - ``` `.md` example: ````{code-block} - ```{rli} https://raw.githubusercontent.com/flyteorg/// - lines: - + ```{literalinclude} /examples/ + :lines: - ``` ```` diff --git a/docs/community/troubleshoot.rst b/docs/community/troubleshoot.rst index 692e3c2aed..2a1b620515 100644 --- a/docs/community/troubleshoot.rst +++ b/docs/community/troubleshoot.rst @@ -176,3 +176,34 @@ Please add ``spark`` to the list of `enabled-plugins` in the config yaml file. F default-for-task-types: - container: container - container_array: K8S-ARRAY + +``authentication handshake failed: x509: "Kubernetes Ingress Controller Fake Certificate" certificate is not trusted"`` when deploying flyte-core to your own kubernetes cluster +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This issue is caused by TLS being disabled in your Kubernetes cluster. You can resolve the problem by following these steps: + +- Enable ``tls`` in the ``values.yaml`` ingress configuration of flyte-core in order to expose gRPC service at 443 port: + +.. code-block:: yaml + + ingress: + host: + separateGrpcIngress: true + separateGrpcIngressAnnotations: + : "grpc" + annotations: + : "/console" + : "/console" + : haproxy + tls: + enabled: true # enable tls + +- Disable ``insecure`` in your ``flytectl`` client ``config.yaml``: + +.. code-block:: yaml + + admin: + endpoint: dns:///example.com + authType: Pkce + insecure: false # disable insecure in flytectl + insecureSkipVerify: true diff --git a/docs/conf.py b/docs/conf.py index 316acc60be..83fca407b7 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -343,6 +343,22 @@ "flytesnacks/README.md", "flytekit/**/README.md", "flytekit/_templates/**", + "examples/advanced_composition/**", + "examples/basics/**", + "examples/customizing_dependencies/**", + "examples/data_types_and_io/**", + "examples/development_lifecycle/**", + "examples/extending/**", + "examples/productionizing/**", + "examples/testing/**", + "flytesnacks/examples/advanced_composition/*.md", + "flytesnacks/examples/basics/*.md", + "flytesnacks/examples/customizing_dependencies/*.md", + "flytesnacks/examples/data_types_and_io/*.md", + "flytesnacks/examples/development_lifecycle/*.md", + "flytesnacks/examples/extending/*.md", + "flytesnacks/examples/productionizing/*.md", + "flytesnacks/examples/testing/*.md", "api/flytectl/index.rst", "protos/boilerplate/**", "protos/tmp/**", @@ -622,14 +638,6 @@ "flytesnacks/_build", "flytesnacks/_tags", "flytesnacks/index.md", - "examples/advanced_composition", - "examples/basics", - "examples/customizing_dependencies", - "examples/data_types_and_io", - "examples/development_lifecycle", - "examples/extending", - "examples/productionizing", - "examples/testing" ] ], "local": flytesnacks_local_path is not None, @@ -690,6 +698,15 @@ # Disable warnings from tensorflow os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" +# Define the canonical URL if you are using a custom domain on Read the Docs +html_baseurl = os.environ.get("READTHEDOCS_CANONICAL_URL", "") + +# Tell Jinja2 templates the build is running on Read the Docs +if os.environ.get("READTHEDOCS", "") == "True": + if "html_context" not in globals(): + html_context = {} + html_context["READTHEDOCS"] = True + class CustomWarningSuppressor(logging.Filter): """Filter logs by `suppress_warnings`.""" diff --git a/docs/deployment/deployment/cloud_simple.rst b/docs/deployment/deployment/cloud_simple.rst index 94833a0db4..e6a6802758 100644 --- a/docs/deployment/deployment/cloud_simple.rst +++ b/docs/deployment/deployment/cloud_simple.rst @@ -29,9 +29,11 @@ these prerequisites. .. note:: - `Union.AI `__ plans to open-source a reference - implementation of these requirements for the major cloud providers in early - 2023. + + `Union.ai `__ maintains a `set of Terraform scripts `__ that automate the configuration + of prerequisites and Flyte installation on AWS, GCP, or Azure. + + A community-maintained guide to manually prepare an EKS environment and deploy Flyte is available `here `__ *************** Installation diff --git a/docs/user_guide/advanced_composition/chaining_flyte_entities.md b/docs/user_guide/advanced_composition/chaining_flyte_entities.md index 4fb12a4149..3a6f85a725 100644 --- a/docs/user_guide/advanced_composition/chaining_flyte_entities.md +++ b/docs/user_guide/advanced_composition/chaining_flyte_entities.md @@ -17,7 +17,7 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte Let's establish a sequence where `t1()` occurs after `t0()`, and `t2()` follows `t1()`. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/chain_entities.py +```{literalinclude} /examples/advanced_composition/advanced_composition/chain_entities.py :caption: advanced_composition/chain_entities.py :lines: 1-30 ``` @@ -27,7 +27,7 @@ Let's establish a sequence where `t1()` occurs after `t0()`, and `t2()` follows Just like tasks, you can chain {ref}`subworkflows `. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/chain_entities.py +```{literalinclude} /examples/advanced_composition/advanced_composition/chain_entities.py :caption: advanced_composition/chain_entities.py :lines: 34-49 ``` diff --git a/docs/user_guide/advanced_composition/conditionals.md b/docs/user_guide/advanced_composition/conditionals.md index 27fb05357b..84d21bb300 100644 --- a/docs/user_guide/advanced_composition/conditionals.md +++ b/docs/user_guide/advanced_composition/conditionals.md @@ -18,7 +18,7 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte To begin, import the necessary libraries. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py +```{literalinclude} /examples/advanced_composition/advanced_composition/conditional.py :caption: advanced_composition/conditional.py :lines: 1-4 ``` @@ -29,7 +29,7 @@ In this example, we introduce two tasks, `calculate_circle_circumference` and `calculate_circle_area`. The workflow dynamically chooses between these tasks based on whether the input falls within the fraction range (0-1) or not. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py +```{literalinclude} /examples/advanced_composition/advanced_composition/conditional.py :caption: advanced_composition/conditional.py :lines: 12-38 ``` @@ -40,7 +40,7 @@ We establish an `if` condition with multiple branches, which will result in a fa It's important to note that any `conditional` statement in Flyte is expected to be complete, meaning that all possible branches must be accounted for. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py +```{literalinclude} /examples/advanced_composition/advanced_composition/conditional.py :caption: advanced_composition/conditional.py :pyobject: shape_properties_with_multiple_branches ``` @@ -55,7 +55,7 @@ a convention also observed in other libraries. ## Consuming the output of a conditional Here, we write a task that consumes the output returned by a `conditional`. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py +```{literalinclude} /examples/advanced_composition/advanced_composition/conditional.py :caption: advanced_composition/conditional.py :lines: 67-85 ``` @@ -66,7 +66,7 @@ You can check if a boolean returned from the previous task is `True`, but unary operations are not supported directly. Instead, use the `is_true`, `is_false` and `is_none` methods on the result. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py +```{literalinclude} /examples/advanced_composition/advanced_composition/conditional.py :caption: advanced_composition/conditional.py :lines: 93-123 ``` @@ -79,7 +79,7 @@ Inputs and outputs are automatically encapsulated in a special object known as { ## Using boolean workflow inputs in a conditional You can directly pass a boolean to a workflow. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py +```{literalinclude} /examples/advanced_composition/advanced_composition/conditional.py :caption: advanced_composition/conditional.py :pyobject: boolean_input_wf ``` @@ -92,7 +92,7 @@ This special object enables it to exhibit additional behavior. You can run the workflows locally as follows: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py +```{literalinclude} /examples/advanced_composition/advanced_composition/conditional.py :caption: advanced_composition/conditional.py :lines: 133-139 ``` @@ -102,7 +102,7 @@ You can run the workflows locally as follows: You can nest conditional sections arbitrarily inside other conditional sections. However, these nested sections can only be in the `then` part of a `conditional` block. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py +```{literalinclude} /examples/advanced_composition/advanced_composition/conditional.py :caption: advanced_composition/conditional.py :lines: 146-168 ``` @@ -112,14 +112,14 @@ However, these nested sections can only be in the `then` part of a `conditional` Let's write a fun workflow that triggers the `calculate_circle_circumference` task in the event of a "heads" outcome, and alternatively, runs the `calculate_circle_area` task in the event of a "tail" outcome. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py +```{literalinclude} /examples/advanced_composition/advanced_composition/conditional.py :caption: advanced_composition/conditional.py :pyobject: consume_task_output ``` You can run the workflow locally as follows: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py +```{literalinclude} /examples/advanced_composition/advanced_composition/conditional.py :caption: advanced_composition/conditional.py :lines: 216-225 ``` @@ -138,7 +138,7 @@ task-plugins: ``` ::: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py +```{literalinclude} /examples/advanced_composition/advanced_composition/conditional.py :caption: advanced_composition/conditional.py :lines: 200-212 ``` diff --git a/docs/user_guide/advanced_composition/decorating_tasks.md b/docs/user_guide/advanced_composition/decorating_tasks.md index 6b39c9f363..7165703798 100644 --- a/docs/user_guide/advanced_composition/decorating_tasks.md +++ b/docs/user_guide/advanced_composition/decorating_tasks.md @@ -17,14 +17,14 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte To begin, import the required dependencies. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/decorating_tasks.py +```{literalinclude} /examples/advanced_composition/advanced_composition/decorating_tasks.py :caption: advanced_composition/decorating_tasks.py :lines: 1-4 ``` Create a logger to monitor the execution's progress. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/decorating_tasks.py +```{literalinclude} /examples/advanced_composition/advanced_composition/decorating_tasks.py :caption: advanced_composition/decorating_tasks.py :lines: 7 ``` @@ -33,7 +33,7 @@ Create a logger to monitor the execution's progress. We define a decorator that logs the input and output details for a decorated task. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/decorating_tasks.py +```{literalinclude} /examples/advanced_composition/advanced_composition/decorating_tasks.py :caption: advanced_composition/decorating_tasks.py :pyobject: log_io ``` @@ -44,7 +44,7 @@ We create a task named `t1` that is decorated with `log_io`. The order of invoking the decorators is important. `@task` should always be the outer-most decorator. ::: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/decorating_tasks.py +```{literalinclude} /examples/advanced_composition/advanced_composition/decorating_tasks.py :caption: advanced_composition/decorating_tasks.py :pyobject: t1 ``` @@ -58,7 +58,7 @@ You can also stack multiple decorators on top of each other as long as `@task` i We define a decorator that verifies if the output from the decorated function is a positive number before it's returned. If this assumption is violated, it raises a `ValueError` exception. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/decorating_tasks.py +```{literalinclude} /examples/advanced_composition/advanced_composition/decorating_tasks.py :caption: advanced_composition/decorating_tasks.py :pyobject: validate_output ``` @@ -69,14 +69,14 @@ The output of the `validate_output` task uses {py:func}`~functools.partial` to i We define a function that uses both the logging and validator decorators. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/decorating_tasks.py +```{literalinclude} /examples/advanced_composition/advanced_composition/decorating_tasks.py :caption: advanced_composition/decorating_tasks.py :pyobject: t2 ``` Finally, we compose a workflow that calls `t1` and `t2`. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/decorating_tasks.py +```{literalinclude} /examples/advanced_composition/advanced_composition/decorating_tasks.py :caption: advanced_composition/decorating_tasks.py :lines: 53-59 ``` diff --git a/docs/user_guide/advanced_composition/decorating_workflows.md b/docs/user_guide/advanced_composition/decorating_workflows.md index 751cd6a95c..ee5f02c8a3 100644 --- a/docs/user_guide/advanced_composition/decorating_workflows.md +++ b/docs/user_guide/advanced_composition/decorating_workflows.md @@ -23,7 +23,7 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte To begin, import the necessary libraries. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/decorating_workflows.py +```{literalinclude} /examples/advanced_composition/advanced_composition/decorating_workflows.py :caption: advanced_composition/decorating_workflows.py :lines: 1-6 ``` @@ -32,7 +32,7 @@ Let's define the tasks we need for setup and teardown. In this example, we use t {py:class}`unittest.mock.MagicMock` class to create a fake external service that we want to initialize at the beginning of our workflow and finish at the end. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/decorating_workflows.py +```{literalinclude} /examples/advanced_composition/advanced_composition/decorating_workflows.py :caption: advanced_composition/decorating_workflows.py :lines: 9-21 ``` @@ -45,7 +45,7 @@ external service and Flyte. We create a decorator that we want to use to wrap our workflow function. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/decorating_workflows.py +```{literalinclude} /examples/advanced_composition/advanced_composition/decorating_workflows.py :caption: advanced_composition/decorating_workflows.py :pyobject: setup_teardown ``` @@ -66,14 +66,14 @@ There are a few key pieces to note in the `setup_teardown` decorator above: We define two tasks that will constitute the workflow. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/decorating_workflows.py +```{literalinclude} /examples/advanced_composition/advanced_composition/decorating_workflows.py :caption: advanced_composition/decorating_workflows.py :lines: 63-70 ``` And then create our decorated workflow: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/decorating_workflows.py +```{literalinclude} /examples/advanced_composition/advanced_composition/decorating_workflows.py :caption: advanced_composition/decorating_workflows.py :lines: 74-82 ``` diff --git a/docs/user_guide/advanced_composition/dynamic_workflows.md b/docs/user_guide/advanced_composition/dynamic_workflows.md index 949d88a847..534e059545 100644 --- a/docs/user_guide/advanced_composition/dynamic_workflows.md +++ b/docs/user_guide/advanced_composition/dynamic_workflows.md @@ -40,28 +40,28 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte To begin, we import the required libraries. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/dynamic_workflow.py +```{literalinclude} /examples/advanced_composition/advanced_composition/dynamic_workflow.py :caption: advanced_composition/dynamic_workflow.py :lines: 1 ``` We define a task that returns the index of a character, where A-Z/a-z is equivalent to 0-25. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/dynamic_workflow.py +```{literalinclude} /examples/advanced_composition/advanced_composition/dynamic_workflow.py :caption: advanced_composition/dynamic_workflow.py :pyobject: return_index ``` We also create a task that prepares a list of 26 characters by populating the frequency of each character. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/dynamic_workflow.py +```{literalinclude} /examples/advanced_composition/advanced_composition/dynamic_workflow.py :caption: advanced_composition/dynamic_workflow.py :pyobject: update_list ``` We define a task to calculate the number of common characters between the two strings. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/dynamic_workflow.py +```{literalinclude} /examples/advanced_composition/advanced_composition/dynamic_workflow.py :caption: advanced_composition/dynamic_workflow.py :pyobject: derive_count ``` @@ -75,7 +75,7 @@ We define a dynamic workflow to accomplish the following: The looping process is contingent on the number of characters in both strings, which is unknown until runtime. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/dynamic_workflow.py +```{literalinclude} /examples/advanced_composition/advanced_composition/dynamic_workflow.py :caption: advanced_composition/dynamic_workflow.py :pyobject: count_characters ``` @@ -97,14 +97,14 @@ Local execution works when a `@dynamic` decorator is used because Flytekit treat Define a workflow that triggers the dynamic workflow. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/dynamic_workflow.py +```{literalinclude} /examples/advanced_composition/advanced_composition/dynamic_workflow.py :caption: advanced_composition/dynamic_workflow.py :pyobject: dynamic_wf ``` You can run the workflow locally as follows: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/dynamic_workflow.py +```{literalinclude} /examples/advanced_composition/advanced_composition/dynamic_workflow.py :caption: advanced_composition/dynamic_workflow.py :lines: 78-79 ``` diff --git a/docs/user_guide/advanced_composition/eager_workflows.md b/docs/user_guide/advanced_composition/eager_workflows.md index 9bf3e019c8..4b83679473 100644 --- a/docs/user_guide/advanced_composition/eager_workflows.md +++ b/docs/user_guide/advanced_composition/eager_workflows.md @@ -45,7 +45,7 @@ using the `@eager` decorator. To clone and run the example code on this page, see the [Flytesnacks repo][flytesnacks]. ``` -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/eager_workflows.py +```{literalinclude} /examples/advanced_composition/advanced_composition/eager_workflows.py :caption: advanced_composition/eager_workflows.py :lines: 1-21 ``` @@ -116,7 +116,7 @@ One of the biggest benefits of eager workflows is that you can now materialize task and subworkflow outputs as Python values and do operations on them just like you would in any other Python function. Let's look at another example: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/eager_workflows.py +```{literalinclude} /examples/advanced_composition/advanced_composition/eager_workflows.py :caption: advanced_composition/eager_workflows.py :pyobject: another_eager_workflow ``` @@ -131,7 +131,7 @@ As you saw in the `simple_eager_workflow` workflow above, you can use regular Python conditionals in your eager workflows. Let's look at a more complicated example: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/eager_workflows.py +```{literalinclude} /examples/advanced_composition/advanced_composition/eager_workflows.py :caption: advanced_composition/eager_workflows.py :lines: 36-53 ``` @@ -144,7 +144,7 @@ to check if `out` is negative, but we're also using the `gt_100` task in the You can also gather the outputs of multiple tasks or subworkflows into a list: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/eager_workflows.py +```{literalinclude} /examples/advanced_composition/advanced_composition/eager_workflows.py :caption: advanced_composition/eager_workflows.py :lines: 58-69 ``` @@ -153,7 +153,7 @@ You can also gather the outputs of multiple tasks or subworkflows into a list: You can also invoke static workflows from within an eager workflow: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/eager_workflows.py +```{literalinclude} /examples/advanced_composition/advanced_composition/eager_workflows.py :caption: advanced_composition/eager_workflows.py :lines: 74-84 ``` @@ -162,7 +162,7 @@ You can also invoke static workflows from within an eager workflow: You can have nest eager subworkflows inside a parent eager workflow: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/eager_workflows.py +```{literalinclude} /examples/advanced_composition/advanced_composition/eager_workflows.py :caption: advanced_composition/eager_workflows.py :lines: 89-97 ``` @@ -171,7 +171,7 @@ You can have nest eager subworkflows inside a parent eager workflow: You can also catch exceptions in eager workflows through `EagerException`: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/eager_workflows.py +```{literalinclude} /examples/advanced_composition/advanced_composition/eager_workflows.py :caption: advanced_composition/eager_workflows.py :lines: 102-117 ``` @@ -195,7 +195,7 @@ and remotely. You can execute eager workflows locally by simply calling them like a regular `async` function: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/eager_workflows.py +```{literalinclude} /examples/advanced_composition/advanced_composition/eager_workflows.py :caption: advanced_composition/eager_workflows.py :lines: 123-125 ``` @@ -244,7 +244,7 @@ When using a sandbox cluster started with `flytectl demo start`, however, the `client_secret_group` and `client_secret_key` are not required, since the default sandbox configuration does not require key-based authentication. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/eager_workflows.py +```{literalinclude} /examples/advanced_composition/advanced_composition/eager_workflows.py :caption: advanced_composition/eager_workflows.py :lines: 130-145 ``` diff --git a/docs/user_guide/advanced_composition/intratask_checkpoints.md b/docs/user_guide/advanced_composition/intratask_checkpoints.md index d856a45714..81631e40e7 100644 --- a/docs/user_guide/advanced_composition/intratask_checkpoints.md +++ b/docs/user_guide/advanced_composition/intratask_checkpoints.md @@ -51,14 +51,14 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte To begin, import the necessary libraries and set the number of task retries to `3`: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/checkpoint.py +```{literalinclude} /examples/advanced_composition/advanced_composition/checkpoint.py :caption: advanced_composition/checkpoint.py :lines: 1-4 ``` We define a task to iterate precisely `n_iterations`, checkpoint its state, and recover from simulated failures: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/checkpoint.py +```{literalinclude} /examples/advanced_composition/advanced_composition/checkpoint.py :caption: advanced_composition/checkpoint.py :pyobject: use_checkpoint ``` @@ -69,14 +69,14 @@ The checkpoint system offers additional APIs, documented in the code accessible Create a workflow that invokes the task: The task will automatically undergo retries in the event of a {ref}`FlyteRecoverableException `. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/checkpoint.py +```{literalinclude} /examples/advanced_composition/advanced_composition/checkpoint.py :caption: advanced_composition/checkpoint.py :pyobject: checkpointing_example ``` The local checkpoint is not utilized here because retries are not supported: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/checkpoint.py +```{literalinclude} /examples/advanced_composition/advanced_composition/checkpoint.py :caption: advanced_composition/checkpoint.py :lines: 37-42 ``` diff --git a/docs/user_guide/advanced_composition/map_tasks.md b/docs/user_guide/advanced_composition/map_tasks.md index 26330a8cd5..f73946cd89 100644 --- a/docs/user_guide/advanced_composition/map_tasks.md +++ b/docs/user_guide/advanced_composition/map_tasks.md @@ -23,14 +23,14 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte To begin, import the required libraries: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/map_task.py +```{literalinclude} /examples/advanced_composition/advanced_composition/map_task.py :caption: advanced_composition/map_task.py :lines: 1 ``` Here's a simple workflow that uses {py:func}`map_task `: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/map_task.py +```{literalinclude} /examples/advanced_composition/advanced_composition/map_task.py :caption: advanced_composition/map_task.py :lines: 4-19 ``` @@ -82,7 +82,7 @@ When defining a map task, avoid calling other tasks in it. Flyte can't accuratel In this example, the map task `suboptimal_mappable_task` would not give you the best performance: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/map_task.py +```{literalinclude} /examples/advanced_composition/advanced_composition/map_task.py :caption: advanced_composition/map_task.py :lines: 31-40 ``` @@ -98,7 +98,7 @@ You might need to map a task with multiple inputs. For instance, consider a task that requires three inputs: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/map_task.py +```{literalinclude} /examples/advanced_composition/advanced_composition/map_task.py :caption: advanced_composition/map_task.py :pyobject: multi_input_task ``` @@ -107,21 +107,21 @@ You may want to map this task with only the ``quantity`` input, while keeping th Since a map task accepts only one input, you can achieve this by partially binding values to the map task. This can be done using the {py:func}`functools.partial` function: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/map_task.py +```{literalinclude} /examples/advanced_composition/advanced_composition/map_task.py :caption: advanced_composition/map_task.py :lines: 52-58 ``` Another possibility is to bind the outputs of a task to partials: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/map_task.py +```{literalinclude} /examples/advanced_composition/advanced_composition/map_task.py :caption: advanced_composition/map_task.py :lines: 63-72 ``` You can also provide multiple lists as input to a `map_task`: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/map_task.py +```{literalinclude} /examples/advanced_composition/advanced_composition/map_task.py :caption: advanced_composition/map_task.py :pyobject: map_workflow_with_lists ``` diff --git a/docs/user_guide/advanced_composition/subworkflows.md b/docs/user_guide/advanced_composition/subworkflows.md index 08a4bbb8d4..14d3cc1006 100644 --- a/docs/user_guide/advanced_composition/subworkflows.md +++ b/docs/user_guide/advanced_composition/subworkflows.md @@ -24,7 +24,7 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte Here's an example illustrating the calculation of slope, intercept and the corresponding y-value: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/subworkflow.py +```{literalinclude} /examples/advanced_composition/advanced_composition/subworkflow.py :caption: advanced_composition/subworkflow.py :lines: 1-35 ``` @@ -34,7 +34,7 @@ Subsequently, the `regression_line_wf` triggers `slope_intercept_wf` and then co To execute the workflow locally, use the following: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/subworkflow.py +```{literalinclude} /examples/advanced_composition/advanced_composition/subworkflow.py :caption: advanced_composition/subworkflow.py :lines: 39-40 ``` @@ -43,14 +43,14 @@ It's possible to nest a workflow that contains a subworkflow within another work Workflows can be easily constructed from other workflows, even if they function as standalone entities. Each workflow in this module has the capability to exist and run independently: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/subworkflow.py +```{literalinclude} /examples/advanced_composition/advanced_composition/subworkflow.py :caption: advanced_composition/subworkflow.py :pyobject: nested_regression_line_wf ``` You can run the nested workflow locally as well: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/subworkflow.py +```{literalinclude} /examples/advanced_composition/advanced_composition/subworkflow.py :caption: advanced_composition/subworkflow.py :lines: 52-53 ``` @@ -71,7 +71,7 @@ external workflows may offer a way to distribute the workload of a workflow acro Here's an example that illustrates the concept of external workflows: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/subworkflow.py +```{literalinclude} /examples/advanced_composition/advanced_composition/subworkflow.py :caption: advanced_composition/subworkflow.py :lines: 61-71 ``` @@ -85,7 +85,7 @@ In the console screenshot above, note that the launch plan execution ID differs You can run a workflow containing an external workflow locally as follows: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/subworkflow.py +```{literalinclude} /examples/advanced_composition/advanced_composition/subworkflow.py :caption: advanced_composition/subworkflow.py :lines: 75-76 ``` diff --git a/docs/user_guide/advanced_composition/waiting_for_external_inputs.md b/docs/user_guide/advanced_composition/waiting_for_external_inputs.md index 0d3a2aae28..edeb6e2b95 100644 --- a/docs/user_guide/advanced_composition/waiting_for_external_inputs.md +++ b/docs/user_guide/advanced_composition/waiting_for_external_inputs.md @@ -43,7 +43,7 @@ your workflow to mock out the behavior of some long-running computation. To clone and run the example code on this page, see the [Flytesnacks repo][flytesnacks]. ``` -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/waiting_for_external_inputs.py +```{literalinclude} /examples/advanced_composition/advanced_composition/waiting_for_external_inputs.py :caption: advanced_composition/waiting_for_external_inputs.py :lines: 1-20 ``` @@ -75,7 +75,7 @@ but before publishing it you want to give it a custom title. You can achieve this by defining a `wait_for_input` node that takes a `str` input and finalizes the report: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/waiting_for_external_inputs.py +```{literalinclude} /examples/advanced_composition/advanced_composition/waiting_for_external_inputs.py :caption: advanced_composition/waiting_for_external_inputs.py :lines: 24-49 ``` @@ -107,7 +107,7 @@ an explicit approval signal before continuing execution. Going back to our report-publishing use case, suppose that we want to block the publishing of a report for some reason (e.g. if they don't appear to be valid): -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/waiting_for_external_inputs.py +```{literalinclude} /examples/advanced_composition/advanced_composition/waiting_for_external_inputs.py :caption: advanced_composition/waiting_for_external_inputs.py :lines: 53-64 ``` @@ -120,7 +120,7 @@ You can also use the output of the `approve` function as a promise, feeding it to a subsequent task. Let's create a version of our report-publishing workflow where the approval happens after `create_report`: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/waiting_for_external_inputs.py +```{literalinclude} /examples/advanced_composition/advanced_composition/waiting_for_external_inputs.py :caption: advanced_composition/waiting_for_external_inputs.py :pyobject: approval_as_promise_wf ``` @@ -133,7 +133,7 @@ useful when we combine them with other Flyte constructs, like {ref}`conditionals To illustrate this, let's extend the report-publishing use case so that we produce an "invalid report" output in case we don't approve the final report: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/waiting_for_external_inputs.py +```{literalinclude} /examples/advanced_composition/advanced_composition/waiting_for_external_inputs.py :caption: advanced_composition/waiting_for_external_inputs.py :lines: 88-114 ``` diff --git a/docs/user_guide/basics/documenting_workflows.md b/docs/user_guide/basics/documenting_workflows.md index 9f5e20d5fb..954bd35302 100644 --- a/docs/user_guide/basics/documenting_workflows.md +++ b/docs/user_guide/basics/documenting_workflows.md @@ -15,14 +15,14 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte To begin, import the relevant libraries: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/documenting_workflows.py +```{literalinclude} /examples/basics/basics/documenting_workflows.py :caption: basics/documenting_workflows.py :lines: 1-3 ``` We import the `slope` and `intercept` tasks from the `workflow.py` file. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/documenting_workflows.py +```{literalinclude} /examples/basics/basics/documenting_workflows.py :caption: basics/documenting_workflows.py :lines: 6 ``` @@ -35,7 +35,7 @@ The initial section of the docstring provides a concise overview of the workflow The subsequent section provides a comprehensive explanation. The last part of the docstring outlines the parameters and return type. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/documenting_workflows.py +```{literalinclude} /examples/basics/basics/documenting_workflows.py :caption: basics/documenting_workflows.py :pyobject: sphinx_docstring_wf ``` @@ -49,7 +49,7 @@ The next section offers a comprehensive description. The third section of the docstring details all parameters along with their respective data types. The final section of the docstring explains the return type and its associated data type. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/documenting_workflows.py +```{literalinclude} /examples/basics/basics/documenting_workflows.py :caption: basics/documenting_workflows.py :pyobject: numpy_docstring_wf ``` @@ -63,7 +63,7 @@ The subsequent section of the docstring provides an extensive explanation. The third segment of the docstring outlines the parameters and return type, including their respective data types. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/documenting_workflows.py +```{literalinclude} /examples/basics/basics/documenting_workflows.py :caption: basics/documenting_workflows.py :pyobject: google_docstring_wf ``` diff --git a/docs/user_guide/basics/hello_world.md b/docs/user_guide/basics/hello_world.md index a63b175fbe..d310a80273 100644 --- a/docs/user_guide/basics/hello_world.md +++ b/docs/user_guide/basics/hello_world.md @@ -17,7 +17,7 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte To begin, import `task` and `workflow` from the `flytekit` library: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/hello_world.py +```{literalinclude} /examples/basics/basics/hello_world.py :caption: basics/hello_world.py :lines: 1 ``` @@ -25,7 +25,7 @@ To begin, import `task` and `workflow` from the `flytekit` library: Define a task that produces the string "Hello, World!". Simply using the `@task` decorator to annotate the Python function: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/hello_world.py +```{literalinclude} /examples/basics/basics/hello_world.py :caption: basics/hello_world.py :pyobject: say_hello ``` @@ -33,14 +33,14 @@ Simply using the `@task` decorator to annotate the Python function: You can handle the output of a task in the same way you would with a regular Python function. Store the output in a variable and use it as a return value for a Flyte workflow: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/hello_world.py +```{literalinclude} /examples/basics/basics/hello_world.py :caption: basics/hello_world.py :pyobject: hello_world_wf ``` Run the workflow by simply calling it like a Python function: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/hello_world.py +```{literalinclude} /examples/basics/basics/hello_world.py :caption: basics/hello_world.py :lines: 19-20 ``` diff --git a/docs/user_guide/basics/imperative_workflows.md b/docs/user_guide/basics/imperative_workflows.md index db0e3a5ee0..e6d189c1ba 100644 --- a/docs/user_guide/basics/imperative_workflows.md +++ b/docs/user_guide/basics/imperative_workflows.md @@ -23,28 +23,28 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte To begin, import the necessary dependencies: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/imperative_workflow.py +```{literalinclude} /examples/basics/basics/imperative_workflow.py :caption: basics/imperative_workflow.py :lines: 1 ``` We import the `slope` and `intercept` tasks from the `workflow.py` file: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/imperative_workflow.py +```{literalinclude} /examples/basics/basics/imperative_workflow.py :caption: basics/imperative_workflow.py :lines: 4 ``` Create an imperative workflow: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/imperative_workflow.py +```{literalinclude} /examples/basics/basics/imperative_workflow.py :caption: basics/imperative_workflow.py :lines: 7 ``` Add the workflow inputs to the imperative workflow: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/imperative_workflow.py +```{literalinclude} /examples/basics/basics/imperative_workflow.py :caption: basics/imperative_workflow.py :lines: 11-12 ``` @@ -56,21 +56,21 @@ you can create a {ref}`launch plan `. Add the tasks that need to be triggered from within the workflow: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/imperative_workflow.py +```{literalinclude} /examples/basics/basics/imperative_workflow.py :caption: basics/imperative_workflow.py :lines: 16-19 ``` Lastly, add the workflow output: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/imperative_workflow.py +```{literalinclude} /examples/basics/basics/imperative_workflow.py :caption: basics/imperative_workflow.py :lines: 23 ``` You can execute the workflow locally as follows: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/imperative_workflow.py +```{literalinclude} /examples/basics/basics/imperative_workflow.py :caption: basics/imperative_workflow.py :lines: 27-28 ``` diff --git a/docs/user_guide/basics/launch_plans.md b/docs/user_guide/basics/launch_plans.md index 63ace47a70..6f1f983ba6 100644 --- a/docs/user_guide/basics/launch_plans.md +++ b/docs/user_guide/basics/launch_plans.md @@ -27,56 +27,56 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte To begin, import the necessary libraries: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/launch_plan.py +```{literalinclude} /examples/basics/basics/launch_plan.py :caption: basics/launch_plan.py :lines: 1 ``` We import the workflow from the `workflow.py` file for which we're going to create a launch plan: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/launch_plan.py +```{literalinclude} /examples/basics/basics/launch_plan.py :caption: basics/launch_plan.py :lines: 5 ``` Create a default launch plan with no inputs during serialization: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/launch_plan.py +```{literalinclude} /examples/basics/basics/launch_plan.py :caption: basics/launch_plan.py :lines: 8 ``` You can run the launch plan locally as follows: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/launch_plan.py +```{literalinclude} /examples/basics/basics/launch_plan.py :caption: basics/launch_plan.py :lines: 11 ``` Create a launch plan and specify the default inputs: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/launch_plan.py +```{literalinclude} /examples/basics/basics/launch_plan.py :caption: basics/launch_plan.py :lines: 14-16 ``` You can trigger the launch plan locally as follows: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/launch_plan.py +```{literalinclude} /examples/basics/basics/launch_plan.py :caption: basics/launch_plan.py :lines: 19 ``` You can override the defaults as follows: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/launch_plan.py +```{literalinclude} /examples/basics/basics/launch_plan.py :caption: basics/launch_plan.py :lines: 22 ``` It's possible to lock launch plan inputs, preventing them from being overridden during execution: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/launch_plan.py +```{literalinclude} /examples/basics/basics/launch_plan.py :caption: basics/launch_plan.py :lines: 25-27 ``` diff --git a/docs/user_guide/basics/named_outputs.md b/docs/user_guide/basics/named_outputs.md index 00b9160997..1d274c88b6 100644 --- a/docs/user_guide/basics/named_outputs.md +++ b/docs/user_guide/basics/named_outputs.md @@ -22,21 +22,21 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte To begin, import the required dependencies: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/named_outputs.py +```{literalinclude} /examples/basics/basics/named_outputs.py :caption: basics/named_outputs.py :lines: 1-3 ``` Define a `NamedTuple` and assign it as an output to a task: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/named_outputs.py +```{literalinclude} /examples/basics/basics/named_outputs.py :caption: basics/named_outputs.py :lines: 6-14 ``` Likewise, assign a `NamedTuple` to the output of `intercept` task: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/named_outputs.py +```{literalinclude} /examples/basics/basics/named_outputs.py :caption: basics/named_outputs.py :lines: 18-26 ``` @@ -59,14 +59,14 @@ Remember that we are extracting individual task execution outputs by dereferenci This is necessary because `NamedTuple`s function as tuples and require this dereferencing: ::: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/named_outputs.py +```{literalinclude} /examples/basics/basics/named_outputs.py :caption: basics/named_outputs.py :lines: 32-39 ``` You can run the workflow locally as follows: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/named_outputs.py +```{literalinclude} /examples/basics/basics/named_outputs.py :caption: basics/named_outputs.py :lines: 43-44 ``` diff --git a/docs/user_guide/basics/shell_tasks.md b/docs/user_guide/basics/shell_tasks.md index 8680b87f5d..e1f8b23bef 100644 --- a/docs/user_guide/basics/shell_tasks.md +++ b/docs/user_guide/basics/shell_tasks.md @@ -15,7 +15,7 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte First, import the necessary libraries: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/shell_task.py +```{literalinclude} /examples/basics/basics/shell_task.py :caption: basics/shell_task.py :lines: 1-8 ``` @@ -24,7 +24,7 @@ With the required imports in place, you can proceed to define a shell task. To create a shell task, provide a name for it, specify the bash script to be executed, and define inputs and outputs if needed: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/shell_task.py +```{literalinclude} /examples/basics/basics/shell_task.py :caption: basics/shell_task.py :lines: 13-55 ``` @@ -40,21 +40,21 @@ Here's a breakdown of the parameters of the `ShellTask`: We define a task to instantiate `FlyteFile` and `FlyteDirectory`. A `.gitkeep` file is created in the FlyteDirectory as a placeholder to ensure the directory exists: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/shell_task.py +```{literalinclude} /examples/basics/basics/shell_task.py :caption: basics/shell_task.py :pyobject: create_entities ``` We create a workflow to define the dependencies between the tasks: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/shell_task.py +```{literalinclude} /examples/basics/basics/shell_task.py :caption: basics/shell_task.py :pyobject: shell_task_wf ``` You can run the workflow locally: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/shell_task.py +```{literalinclude} /examples/basics/basics/shell_task.py :caption: basics/shell_task.py :lines: 85-86 ``` diff --git a/docs/user_guide/basics/tasks.md b/docs/user_guide/basics/tasks.md index b76e61f5dc..22d078f9f1 100644 --- a/docs/user_guide/basics/tasks.md +++ b/docs/user_guide/basics/tasks.md @@ -34,7 +34,7 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte To begin, import `task` from the `flytekit` library: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/basics/basics/task.py +```{literalinclude} /examples/basics/basics/task.py :caption: basics/task.py :lines: 1 ``` @@ -45,7 +45,7 @@ Learn more about the supported types in the {ref}`type-system section = 1.11.0. ::: +### Raise User Error + +Raw containers handle errors by checking for the presence of an `_ERROR` file in the +`output_data_dir` after the container's execution. If this file exists, Flyte treats it as +a user-defined error and retries the task if `retries` parameter is set in the task +metadata. + ## Scripts The contents of each script specified in the `ContainerTask` is as follows: diff --git a/docs/user_guide/data_types_and_io/accessing_attributes.md b/docs/user_guide/data_types_and_io/accessing_attributes.md index 8df9aaed4d..82b2345ad5 100644 --- a/docs/user_guide/data_types_and_io/accessing_attributes.md +++ b/docs/user_guide/data_types_and_io/accessing_attributes.md @@ -1,13 +1,19 @@ (attribute_access)= -# Accessing attributes +# Accessing attributes in workflows ```{eval-rst} .. tags:: Basic ``` -You can directly access attributes on output promises for lists, dicts, dataclasses and combinations of these types in Flyte. This functionality facilitates the direct passing of output attributes within workflows, -enhancing the convenience of working with complex data structures. +You can directly access attributes on output promises for lists, dictionaries, dataclasses, and combinations of these types in Flyte. +Note that while this functionality may appear to be the normal behavior of Python, code in `@workflow` functions is not actually Python, but rather a Python-like DSL that is compiled by Flyte. +Consequently, accessing attributes in this manner is, in fact, a specially implemented feature. +This functionality facilitates the direct passing of output attributes within workflows, enhancing the convenience of working with complex data structures. + +```{important} +Flytekit version >= v1.14.0 supports Pydantic BaseModel V2, you can do attribute access on Pydantic BaseModel V2 as well. +``` ```{note} To clone and run the example code on this page, see the [Flytesnacks repo][flytesnacks]. @@ -15,9 +21,9 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte To begin, import the required dependencies and define a common task for subsequent use: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/attribute_access.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/attribute_access.py :caption: data_types_and_io/attribute_access.py -:lines: 1-10 +:lines: 1-9 ``` ## List @@ -27,40 +33,40 @@ You can access an output list using index notation. Flyte currently does not support output promise access through list slicing. ::: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/attribute_access.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/attribute_access.py :caption: data_types_and_io/attribute_access.py -:lines: 14-23 +:lines: 13-22 ``` ## Dictionary Access the output dictionary by specifying the key. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/attribute_access.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/attribute_access.py :caption: data_types_and_io/attribute_access.py -:lines: 27-35 +:lines: 26-34 ``` ## Data class Directly access an attribute of a dataclass. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/attribute_access.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/attribute_access.py :caption: data_types_and_io/attribute_access.py -:lines: 39-53 +:lines: 38-51 ``` ## Complex type Combinations of list, dict and dataclass also work effectively. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/attribute_access.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/attribute_access.py :caption: data_types_and_io/attribute_access.py -:lines: 57-80 +:lines: 55-78 ``` You can run all the workflows locally as follows: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/attribute_access.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/attribute_access.py :caption: data_types_and_io/attribute_access.py -:lines: 84-88 +:lines: 82-86 ``` ## Failure scenario diff --git a/docs/user_guide/data_types_and_io/dataclass.md b/docs/user_guide/data_types_and_io/dataclass.md index bc7ae9a26d..462ba7da3a 100644 --- a/docs/user_guide/data_types_and_io/dataclass.md +++ b/docs/user_guide/data_types_and_io/dataclass.md @@ -11,8 +11,24 @@ When you've multiple values that you want to send across Flyte entities, you can Flytekit uses the [Mashumaro library](https://github.com/Fatal1ty/mashumaro) to serialize and deserialize dataclasses. +With the 1.14 release, `flytekit` adopted `MessagePack` as the +serialization format for dataclasses, overcoming a major limitation of serialization into a JSON string within a Protobuf `struct` datatype, like the previous versions do: + +to store `int` types, Protobuf's `struct` converts them to `float`, forcing users to write boilerplate code to work around this issue. + +:::{important} +If you're using Flytekit version < v1.11.1, you will need to add `from dataclasses_json import dataclass_json` to your imports and decorate your dataclass with `@dataclass_json`. +::: + :::{important} -If you're using Flytekit version below v1.11.1, you will need to add `from dataclasses_json import dataclass_json` to your imports and decorate your dataclass with `@dataclass_json`. +Flytekit version < v1.14.0 will produce protobuf `struct` literal for dataclasses. + +Flytekit version >= v1.14.0 will produce msgpack bytes literal for dataclasses. + +If you're using Flytekit version >= v1.14.0 and you want to produce protobuf `struct` literal for dataclasses, you can +set environment variable `FLYTE_USE_OLD_DC_FORMAT` to `true`. + +For more details, you can refer the MSGPACK IDL RFC: https://github.com/flyteorg/flyte/blob/master/rfc/system/5741-binary-idl-with-message-pack.md ::: ```{note} @@ -21,13 +37,13 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte To begin, import the necessary dependencies: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/cfb5ea3b0d0502ef7df1f2e14f4a0d9b78250b6a/examples/data_types_and_io/data_types_and_io/dataclass.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/dataclass.py :caption: data_types_and_io/dataclass.py :lines: 1-9 ``` Build your custom image with ImageSpec: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/cfb5ea3b0d0502ef7df1f2e14f4a0d9b78250b6a/examples/data_types_and_io/data_types_and_io/dataclass.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/dataclass.py :caption: data_types_and_io/dataclass.py :lines: 16-19 ``` @@ -35,7 +51,7 @@ Build your custom image with ImageSpec: ## Python types We define a `dataclass` with `int`, `str` and `dict` as the data types. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/cfb5ea3b0d0502ef7df1f2e14f4a0d9b78250b6a/examples/data_types_and_io/data_types_and_io/dataclass.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/dataclass.py :caption: data_types_and_io/dataclass.py :pyobject: Datum ``` @@ -48,7 +64,7 @@ All variables in a data class should be **annotated with their type**. Failure t Once declared, a dataclass can be returned as an output or accepted as an input. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/cfb5ea3b0d0502ef7df1f2e14f4a0d9b78250b6a/examples/data_types_and_io/data_types_and_io/dataclass.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/dataclass.py :caption: data_types_and_io/dataclass.py :lines: 32-47 ``` @@ -57,7 +73,7 @@ Once declared, a dataclass can be returned as an output or accepted as an input. We also define a data class that accepts {std:ref}`StructuredDataset `, {std:ref}`FlyteFile ` and {std:ref}`FlyteDirectory `. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/cfb5ea3b0d0502ef7df1f2e14f4a0d9b78250b6a/examples/data_types_and_io/data_types_and_io/dataclass.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/dataclass.py :caption: data_types_and_io/dataclass.py :lines: 51-88 ``` @@ -67,14 +83,14 @@ flyte file, flyte directory and structured dataset. We define a workflow that calls the tasks created above. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/cfb5ea3b0d0502ef7df1f2e14f4a0d9b78250b6a/examples/data_types_and_io/data_types_and_io/dataclass.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/dataclass.py :caption: data_types_and_io/dataclass.py :pyobject: dataclass_wf ``` You can run the workflow locally as follows: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/cfb5ea3b0d0502ef7df1f2e14f4a0d9b78250b6a/examples/data_types_and_io/data_types_and_io/dataclass.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/dataclass.py :caption: data_types_and_io/dataclass.py :lines: 101-102 ``` diff --git a/docs/user_guide/data_types_and_io/enum_type.md b/docs/user_guide/data_types_and_io/enum_type.md index b8e9011921..f5b1873d98 100644 --- a/docs/user_guide/data_types_and_io/enum_type.md +++ b/docs/user_guide/data_types_and_io/enum_type.md @@ -22,7 +22,7 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte To begin, import the dependencies: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/enum_type.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/enum_type.py :caption: data_types_and_io/enum_type.py :lines: 1-3 ``` @@ -30,14 +30,14 @@ To begin, import the dependencies: We define an enum and a simple coffee maker workflow that accepts an order and brews coffee ☕️ accordingly. The assumption is that the coffee maker only understands enum inputs: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/enum_type.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/enum_type.py :caption: data_types_and_io/enum_type.py :lines: 9-35 ``` The workflow can also accept an enum value: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/enum_type.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/enum_type.py :caption: data_types_and_io/enum_type.py :pyobject: coffee_maker_enum ``` @@ -51,7 +51,7 @@ pyflyte run \ You can run the workflows locally: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/enum_type.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/enum_type.py :caption: data_types_and_io/enum_type.py :lines: 44-46 ``` diff --git a/docs/user_guide/data_types_and_io/flytedirectory.md b/docs/user_guide/data_types_and_io/flytedirectory.md index 4ad2316ded..82cc5ab2a0 100644 --- a/docs/user_guide/data_types_and_io/flytedirectory.md +++ b/docs/user_guide/data_types_and_io/flytedirectory.md @@ -16,7 +16,7 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte To begin, import the libraries: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/folder.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/folder.py :caption: data_types_and_io/folder.py :lines: 1-10 ``` @@ -27,7 +27,7 @@ let's continue by considering the normalization of columns in a CSV file. The following task downloads a list of URLs pointing to CSV files and returns the folder path in a `FlyteDirectory` object. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/folder.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/folder.py :caption: data_types_and_io/folder.py :pyobject: download_files ``` @@ -57,7 +57,7 @@ demonstrates how Flyte tasks are simply entrypoints of execution, which can them other functions and routines that are written in pure Python. ::: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/folder.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/folder.py :caption: data_types_and_io/folder.py :pyobject: normalize_columns ``` @@ -65,7 +65,7 @@ other functions and routines that are written in pure Python. We then define a task that accepts the previously downloaded folder, along with some metadata about the column names of each file in the directory and the column names that we want to normalize. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/folder.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/folder.py :caption: data_types_and_io/folder.py :pyobject: normalize_all_files ``` @@ -74,14 +74,14 @@ Compose all of the above tasks into a workflow. This workflow accepts a list of URL strings pointing to a remote location containing a CSV file, a list of column names associated with each CSV file, and a list of columns that we want to normalize. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/folder.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/folder.py :caption: data_types_and_io/folder.py :pyobject: download_and_normalize_csv_files ``` You can run the workflow locally as follows: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/folder.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/folder.py :caption: data_types_and_io/folder.py :lines: 94-114 ``` @@ -98,7 +98,7 @@ This feature is marked as experimental. We'd love feedback on the API! Here is a simple example, you can accept a `FlyteDirectory` as an input, walk through it and copy the files to another `FlyteDirectory` one by one. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/ddce0448141ea6d2cb148df52bf408874adb15ad/examples/data_types_and_io/data_types_and_io/file_streaming.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/file_streaming.py :caption: data_types_and_io/file_streaming.py :lines: 23-33 ``` diff --git a/docs/user_guide/data_types_and_io/flytefile.md b/docs/user_guide/data_types_and_io/flytefile.md index 76dc0f6be8..997dbe031a 100644 --- a/docs/user_guide/data_types_and_io/flytefile.md +++ b/docs/user_guide/data_types_and_io/flytefile.md @@ -23,7 +23,7 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte First, import the libraries: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/file.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/file.py :caption: data_types_and_io/file.py :lines: 1-8 ``` @@ -43,7 +43,7 @@ Predefined aliases for commonly used flyte file formats are also available. You can find them [here](https://github.com/flyteorg/flytekit/blob/master/flytekit/types/file/__init__.py). ::: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/file.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/file.py :caption: data_types_and_io/file.py :pyobject: normalize_columns ``` @@ -56,16 +56,16 @@ When this task finishes, the Flytekit engine returns the `FlyteFile` instance, u Lastly, define a workflow. The `normalize_csv_files` workflow has an `output_location` argument which is passed to the `location` input of the task. If it's not an empty string, the task attempts to upload its file to that location. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/0ec8388759d34566a0ffc0c3c2d7443fd4a3a46f/examples/data_types_and_io/data_types_and_io/file.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/file.py :caption: data_types_and_io/file.py :pyobject: normalize_csv_file ``` You can run the workflow locally as follows: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/0ec8388759d34566a0ffc0c3c2d7443fd4a3a46f/examples/data_types_and_io/data_types_and_io/file.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/file.py :caption: data_types_and_io/file.py -:lines: 75-95 +:lines: 72-92 ``` You can enable type validation if you have the [python-magic](https://pypi.org/project/python-magic/) package installed. @@ -101,7 +101,7 @@ This feature is marked as experimental. We'd love feedback on the API! Here is a simple example of removing some columns from a CSV file and writing the result to a new file: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/ddce0448141ea6d2cb148df52bf408874adb15ad/examples/data_types_and_io/data_types_and_io/file_streaming.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/file_streaming.py :caption: data_types_and_io/file_streaming.py :lines: 8-20 ``` diff --git a/docs/user_guide/data_types_and_io/index.md b/docs/user_guide/data_types_and_io/index.md index 3280054696..c554b08acd 100644 --- a/docs/user_guide/data_types_and_io/index.md +++ b/docs/user_guide/data_types_and_io/index.md @@ -114,7 +114,7 @@ Here's a breakdown of these mappings: - Use ``pyspark.DataFrame`` as a type hint. * - ``pydantic.BaseModel`` - ``Map`` - - To utilize the type, install the ``flytekitplugins-pydantic`` plugin. + - To utilize the type, install the ``pydantic>2`` module. - Use ``pydantic.BaseModel`` as a type hint. * - ``torch.Tensor`` / ``torch.nn.Module`` - File @@ -144,6 +144,7 @@ flytefile flytedirectory structureddataset dataclass +pydantic_basemodel accessing_attributes pytorch_type enum_type diff --git a/docs/user_guide/data_types_and_io/pickle_type.md b/docs/user_guide/data_types_and_io/pickle_type.md index 301ff95f9f..6a1d84bd37 100644 --- a/docs/user_guide/data_types_and_io/pickle_type.md +++ b/docs/user_guide/data_types_and_io/pickle_type.md @@ -27,7 +27,7 @@ This example demonstrates how you can utilize custom objects without registering To clone and run the example code on this page, see the [Flytesnacks repo][flytesnacks]. ``` -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/pickle_type.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/pickle_type.py :caption: data_types_and_io/pickle_type.py :lines: 1 ``` @@ -40,7 +40,7 @@ Alternatively, you can {ref}`turn this object into a dataclass ` for We have used a simple object here for demonstration purposes. ::: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/pickle_type.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/pickle_type.py :caption: data_types_and_io/pickle_type.py :lines: 7-26 ``` @@ -53,7 +53,7 @@ or significant list elements, you can specify a batch size. This feature allows for the processing of each batch as a separate pickle file. The following example demonstrates how to set the batch size. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/pickle_type.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/pickle_type.py :caption: data_types_and_io/pickle_type.py :lines: 35-58 ``` @@ -64,7 +64,7 @@ The `welcome_superheroes` task will generate two pickle files: one containing tw You can run the workflows locally as follows: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/pickle_type.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/pickle_type.py :caption: data_types_and_io/pickle_type.py :lines: 62-64 ``` diff --git a/docs/user_guide/data_types_and_io/pydantic_basemodel.md b/docs/user_guide/data_types_and_io/pydantic_basemodel.md new file mode 100644 index 0000000000..be40672534 --- /dev/null +++ b/docs/user_guide/data_types_and_io/pydantic_basemodel.md @@ -0,0 +1,103 @@ +(pydantic_basemodel)= + +# Pydantic BaseModel + +```{eval-rst} +.. tags:: Basic +``` + +`flytekit` version >=1.14 supports natively the `JSON` format that Pydantic `BaseModel` produces, enhancing the +interoperability of Pydantic BaseModels with the Flyte type system. + +:::{important} +Pydantic BaseModel V2 only works when you are using flytekit version >= v1.14.0. +::: + +With the 1.14 release, `flytekit` adopted `MessagePack` as the serialization format for Pydantic `BaseModel`, +overcoming a major limitation of serialization into a JSON string within a Protobuf `struct` datatype like the previous versions do: + +to store `int` types, Protobuf's `struct` converts them to `float`, forcing users to write boilerplate code to work around this issue. + +:::{important} +By default, `flytekit >= 1.14` will produce `msgpack` bytes literals when serializing, preserving the types defined in your `BaseModel` class. +If you're serializing `BaseModel` using `flytekit` version >= v1.14.0 and you want to produce Protobuf `struct` literal instead, you can set environment variable `FLYTE_USE_OLD_DC_FORMAT` to `true`. + +For more details, you can refer the MESSAGEPACK IDL RFC: https://github.com/flyteorg/flyte/blob/master/rfc/system/5741-binary-idl-with-message-pack.md +::: + +```{note} +You can put Dataclass and FlyteTypes (FlyteFile, FlyteDirectory, FlyteSchema, and StructuredDataset) in a pydantic BaseModel. +``` + +```{note} +To clone and run the example code on this page, see the [Flytesnacks repo][flytesnacks]. +``` + +To begin, import the necessary dependencies: + +```{literalinclude} /examples/data_types_and_io/data_types_and_io/pydantic_basemodel.py +:caption: data_types_and_io/pydantic_basemodel.py +:lines: 1-9 +``` + +Build your custom image with ImageSpec: +```{literalinclude} /examples/data_types_and_io/data_types_and_io/pydantic_basemodel.py +:caption: data_types_and_io/pydantic_basemodel.py +:lines: 11-14 +``` + +## Python types +We define a `pydantic basemodel` with `int`, `str` and `dict` as the data types. + +```{literalinclude} /examples/data_types_and_io/data_types_and_io/pydantic_basemodel.py +:caption: data_types_and_io/pydantic_basemodel.py +:pyobject: Datum +``` + +You can send a `pydantic basemodel` between different tasks written in various languages, and input it through the Flyte console as raw JSON. + +:::{note} +All variables in a data class should be **annotated with their type**. Failure to do should will result in an error. +::: + +Once declared, a dataclass can be returned as an output or accepted as an input. + +```{literalinclude} /examples/data_types_and_io/data_types_and_io/pydantic_basemodel.py +:caption: data_types_and_io/pydantic_basemodel.py +:lines: 26-41 +``` + +## Flyte types +We also define a data class that accepts {std:ref}`StructuredDataset `, +{std:ref}`FlyteFile ` and {std:ref}`FlyteDirectory `. + +```{literalinclude} /examples/data_types_and_io/data_types_and_io/pydantic_basemodel.py +:caption: data_types_and_io/pydantic_basemodel.py +:lines: 45-86 +``` + +A data class supports the usage of data associated with Python types, data classes, +flyte file, flyte directory and structured dataset. + +We define a workflow that calls the tasks created above. + +```{literalinclude} /examples/data_types_and_io/data_types_and_io/pydantic_basemodel.py +:caption: data_types_and_io/pydantic_basemodel.py +:pyobject: basemodel_wf +``` + +You can run the workflow locally as follows: + +```{literalinclude} /examples/data_types_and_io/data_types_and_io/pydantic_basemodel.py +:caption: data_types_and_io/pydantic_basemodel.py +:lines: 99-100 +``` + +To trigger a task that accepts a dataclass as an input with `pyflyte run`, you can provide a JSON file as an input: +``` +pyflyte run \ + https://raw.githubusercontent.com/flyteorg/flytesnacks/b71e01d45037cea883883f33d8d93f258b9a5023/examples/data_types_and_io/data_types_and_io/pydantic_basemodel.py \ + basemodel_wf --x 1 --y 2 +``` + +[flytesnacks]: https://github.com/flyteorg/flytesnacks/tree/master/examples/data_types_and_io/ diff --git a/docs/user_guide/data_types_and_io/pytorch_type.md b/docs/user_guide/data_types_and_io/pytorch_type.md index 24696f6a75..685c7a38b2 100644 --- a/docs/user_guide/data_types_and_io/pytorch_type.md +++ b/docs/user_guide/data_types_and_io/pytorch_type.md @@ -18,7 +18,7 @@ At times, you may find the need to pass tensors and modules (models) within your To clone and run the example code on this page, see the [Flytesnacks repo][flytesnacks]. ``` -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/pytorch_type.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/pytorch_type.py :caption: data_types_and_io/pytorch_type.py :lines: 5-50 ``` @@ -36,7 +36,7 @@ According to the PyTorch [docs](https://pytorch.org/tutorials/beginner/saving_lo it's recommended to store the module's `state_dict` rather than the module itself, although the serialization should work in either case. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/pytorch_type.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/pytorch_type.py :caption: data_types_and_io/pytorch_type.py :lines: 63-117 ``` diff --git a/docs/user_guide/data_types_and_io/structureddataset.md b/docs/user_guide/data_types_and_io/structureddataset.md index 9a82610590..caacb15b89 100644 --- a/docs/user_guide/data_types_and_io/structureddataset.md +++ b/docs/user_guide/data_types_and_io/structureddataset.md @@ -37,14 +37,14 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte To begin, import the dependencies for the example: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/structured_dataset.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/structured_dataset.py :caption: data_types_and_io/structured_dataset.py :lines: 1-19 ``` Define a task that returns a Pandas DataFrame. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/structured_dataset.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/structured_dataset.py :caption: data_types_and_io/structured_dataset.py :pyobject: generate_pandas_df ``` @@ -66,9 +66,9 @@ you can just specify the column names and their types in the structured dataset First, initialize column types you want to extract from the `StructuredDataset`. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/structured_dataset.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/structured_dataset.py :caption: data_types_and_io/structured_dataset.py -:lines: 31-32 +:lines: 36-37 ``` Define a task that opens a structured dataset by calling `all()`. @@ -76,9 +76,9 @@ When you invoke `all()` with ``pandas.DataFrame``, the Flyte engine downloads th Keep in mind that you can invoke ``open()`` with any dataframe type that's supported or added to structured dataset. For instance, you can use ``pa.Table`` to convert the Pandas DataFrame to a PyArrow table. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/structured_dataset.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/structured_dataset.py :caption: data_types_and_io/structured_dataset.py -:lines: 42-52 +:lines: 47-57 ``` The code may result in runtime failures if the columns do not match. @@ -89,9 +89,9 @@ You can use a custom serialization format to serialize your dataframes. Here's how you can register the Pandas to CSV handler, which is already available, and enable the CSV serialization by annotating the structured dataset with the CSV format: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/structured_dataset.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/structured_dataset.py :caption: data_types_and_io/structured_dataset.py -:lines: 58-72 +:lines: 63-77 ``` ## Storage driver and location @@ -198,7 +198,7 @@ enabling the use of a 2D NumPy array as a valid type within structured datasets. Extend `StructuredDatasetEncoder` and implement the `encode` function. The `encode` function converts NumPy array to an intermediate format (parquet file format in this case). -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/structured_dataset.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/structured_dataset.py :caption: data_types_and_io/structured_dataset.py :pyobject: NumpyEncodingHandler ``` @@ -208,7 +208,7 @@ The `encode` function converts NumPy array to an intermediate format (parquet fi Extend {py:class}`StructuredDatasetDecoder` and implement the {py:meth}`~StructuredDatasetDecoder.decode` function. The {py:meth}`~StructuredDatasetDecoder.decode` function converts the parquet file to a `numpy.ndarray`. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/structured_dataset.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/structured_dataset.py :caption: data_types_and_io/structured_dataset.py :pyobject: NumpyDecodingHandler ``` @@ -218,7 +218,7 @@ The {py:meth}`~StructuredDatasetDecoder.decode` function converts the parquet fi Create a default renderer for numpy array, then Flytekit will use this renderer to display schema of NumPy array on the Flyte deck. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/structured_dataset.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/structured_dataset.py :caption: data_types_and_io/structured_dataset.py :pyobject: NumpyRenderer ``` @@ -228,16 +228,16 @@ Specify the Python type you want to register this encoder with (`np.ndarray`), the storage engine to register this against (if not specified, it is assumed to work for all the storage backends), and the byte format, which in this case is `PARQUET`. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/structured_dataset.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/structured_dataset.py :caption: data_types_and_io/structured_dataset.py -:lines: 128-130 +:lines: 133-135 ``` You can now use `numpy.ndarray` to deserialize the parquet file to NumPy and serialize a task's output (NumPy array) to a parquet file. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/structured_dataset.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/structured_dataset.py :caption: data_types_and_io/structured_dataset.py -:lines: 135-148 +:lines: 140-153 ``` :::{note} @@ -246,9 +246,9 @@ You can now use `numpy.ndarray` to deserialize the parquet file to NumPy and ser You can run the code locally as follows: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/structured_dataset.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/structured_dataset.py :caption: data_types_and_io/structured_dataset.py -:lines: 152-156 +:lines: 157-161 ``` ### The nested typed columns @@ -259,9 +259,9 @@ Like most storage formats (e.g. Avro, Parquet, and BigQuery), StructuredDataset Nested field StructuredDataset should be run when flytekit version > 1.11.0. ::: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/structured_dataset.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/structured_dataset.py :caption: data_types_and_io/structured_dataset.py -:lines: 158-285 +:lines: 163-290 ``` [flytesnacks]: https://github.com/flyteorg/flytesnacks/tree/master/examples/data_types_and_io/ diff --git a/docs/user_guide/data_types_and_io/tensorflow_type.md b/docs/user_guide/data_types_and_io/tensorflow_type.md index a68ce5ecaf..9035c71db5 100644 --- a/docs/user_guide/data_types_and_io/tensorflow_type.md +++ b/docs/user_guide/data_types_and_io/tensorflow_type.md @@ -9,9 +9,9 @@ This document outlines the TensorFlow types available in Flyte, which facilitate the integration of TensorFlow models and datasets in Flyte workflows. ### Import necessary libraries and modules -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/refs/heads/master/examples/data_types_and_io/data_types_and_io/tensorflow_type.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/tensorflow_type.py :caption: data_types_and_io/tensorflow_type.py -:lines: 2-14 +:lines: 3-12 ``` ## Tensorflow model @@ -30,9 +30,9 @@ The `TensorFlowModelTransformer` allows you to save a TensorFlow model to a remo ```{note} To clone and run the example code on this page, see the [Flytesnacks repo][flytesnacks]. ``` -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/refs/heads/master/examples/data_types_and_io/data_types_and_io/tensorflow_type.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/tensorflow_type.py :caption: data_types_and_io/tensorflow_type.py -:lines: 16-33 +:lines: 16-34 ``` ## TFRecord files @@ -47,9 +47,9 @@ Flyte supports TFRecord files through the TFRecordFile type, which can handle se ### Usage The `TensorFlowRecordFileTransformer` enables you to work with single TFRecord files, making it easy to read and write data in TensorFlow's TFRecord format. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/refs/heads/master/examples/data_types_and_io/data_types_and_io/tensorflow_type.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/tensorflow_type.py :caption: data_types_and_io/tensorflow_type.py -:lines: 35-45 +:lines: 38-48 ``` ## TFRecord directories @@ -66,9 +66,9 @@ Flyte supports directories containing multiple TFRecord files through the `TFRec The `TensorFlowRecordsDirTransformer` allows you to work with directories of TFRecord files, which is useful for handling large datasets that are split across multiple files. #### Example -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/refs/heads/master/examples/data_types_and_io/data_types_and_io/tensorflow_type.py +```{literalinclude} /examples/data_types_and_io/data_types_and_io/tensorflow_type.py :caption: data_types_and_io/tensorflow_type.py -:lines: 47-56 +:lines: 52-62 ``` ## Configuration class: `TFRecordDatasetConfig` diff --git a/docs/user_guide/development_lifecycle/cache_serializing.md b/docs/user_guide/development_lifecycle/cache_serializing.md index 1445de13cf..4ea14fb89e 100644 --- a/docs/user_guide/development_lifecycle/cache_serializing.md +++ b/docs/user_guide/development_lifecycle/cache_serializing.md @@ -17,7 +17,7 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte For any {py:func}`flytekit.task` in Flyte, there is always one required import, which is: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/development_lifecycle/development_lifecycle/task_cache_serialize.py +```{literalinclude} /examples/development_lifecycle/development_lifecycle/task_cache_serialize.py :caption: development_lifecycle/task_cache_serialize.py :lines: 1 ``` @@ -27,7 +27,7 @@ Task cache serializing is disabled by default to avoid unexpected behavior for t This operation is only useful for cacheable tasks, where one may reuse output from a previous execution. Flyte requires implicitly enabling the `cache` parameter on all cache serializable tasks. Cache key definitions follow the same rules as non-serialized cache tasks. It is important to understand the implications of the task signature and `cache_version` parameter in defining cached results. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/development_lifecycle/development_lifecycle/task_cache_serialize.py +```{literalinclude} /examples/development_lifecycle/development_lifecycle/task_cache_serialize.py :caption: development_lifecycle/task_cache_serialize.py :pyobject: square ``` diff --git a/docs/user_guide/development_lifecycle/caching.md b/docs/user_guide/development_lifecycle/caching.md index ea6a5af574..2711aee68a 100644 --- a/docs/user_guide/development_lifecycle/caching.md +++ b/docs/user_guide/development_lifecycle/caching.md @@ -77,19 +77,19 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte Import the necessary libraries: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/development_lifecycle/development_lifecycle/task_cache.py +```{literalinclude} /examples/development_lifecycle/development_lifecycle/task_cache.py :caption: development_lifecycle/task_cache.py :lines: 1-3 ``` For any {py:func}`flytekit.task` in Flyte, there is always one required import, which is: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/development_lifecycle/development_lifecycle/task_cache.py +```{literalinclude} /examples/development_lifecycle/development_lifecycle/task_cache.py :caption: development_lifecycle/task_cache.py :lines: 8-10 ``` -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/development_lifecycle/development_lifecycle/task_cache.py +```{literalinclude} /examples/development_lifecycle/development_lifecycle/task_cache.py :caption: development_lifecycle/task_cache.py :pyobject: square ``` @@ -150,18 +150,18 @@ The format used by the store is opaque and not meant to be inspectable. The default behavior displayed by Flyte's memoization feature might not match the user intuition. For example, this code makes use of pandas dataframes: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/development_lifecycle/development_lifecycle/task_cache.py +```{literalinclude} /examples/development_lifecycle/development_lifecycle/task_cache.py :caption: development_lifecycle/task_cache.py -:lines: 39-54 +:lines: 44-59 ``` If run twice with the same inputs, one would expect that `bar` would trigger a cache hit, but it turns out that's not the case because of how dataframes are represented in Flyte. However, with release 1.2.0, Flyte provides a new way to control memoization behavior of literals. This is done via a `typing.Annotated` call on the task signature. For example, in order to cache the result of calls to `bar`, you can rewrite the code above like this: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/development_lifecycle/development_lifecycle/task_cache.py +```{literalinclude} /examples/development_lifecycle/development_lifecycle/task_cache.py :caption: development_lifecycle/task_cache.py -:lines: 64-85 +:lines: 69-91 ``` Note how the output of task `foo` is annotated with an object of type `HashMethod`. Essentially, it represents a function that produces a hash that is used as part of the cache key calculation in calling the task `bar`. @@ -175,9 +175,9 @@ This feature also works in local execution. Here's a complete example of the feature: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/development_lifecycle/development_lifecycle/task_cache.py +```{literalinclude} /examples/development_lifecycle/development_lifecycle/task_cache.py :caption: development_lifecycle/task_cache.py -:lines: 97-134 +:lines: 103-140 ``` [flytesnacks]: https://github.com/flyteorg/flytesnacks/tree/master/examples/development_lifecycle/ diff --git a/docs/user_guide/development_lifecycle/decks.md b/docs/user_guide/development_lifecycle/decks.md index 366302d49e..68887615b3 100644 --- a/docs/user_guide/development_lifecycle/decks.md +++ b/docs/user_guide/development_lifecycle/decks.md @@ -28,7 +28,7 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte To begin, import the dependencies: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/development_lifecycle/development_lifecycle/decks.py +```{literalinclude} /examples/development_lifecycle/development_lifecycle/decks.py :caption: development_lifecycle/decks.py :lines: 1-4 ``` @@ -39,7 +39,7 @@ We create a new deck named `pca` and render Markdown content along with a You can begin by initializing an {ref}`ImageSpec ` object to encompass all the necessary dependencies. This approach automatically triggers a Docker build, alleviating the need for you to manually create a Docker image. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/b431ae399def3a749833fe81c2c291b016cf3213/examples/development_lifecycle/development_lifecycle/decks.py +```{literalinclude} /examples/development_lifecycle/development_lifecycle/decks.py :caption: development_lifecycle/decks.py :lines: 15-27 ``` @@ -51,7 +51,7 @@ To upload the image to the local registry in the demo cluster, indicate the regi Note the usage of `append` to append the Plotly deck to the Markdown deck. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/development_lifecycle/development_lifecycle/decks.py +```{literalinclude} /examples/development_lifecycle/development_lifecycle/decks.py :caption: development_lifecycle/decks.py :pyobject: pca_plot ``` @@ -96,7 +96,7 @@ When the task connected with a deck object is executed, these objects employ ren Creates a profile report from a Pandas DataFrame. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/b431ae399def3a749833fe81c2c291b016cf3213/examples/development_lifecycle/development_lifecycle/decks.py +```{literalinclude} /examples/development_lifecycle/development_lifecycle/decks.py :caption: development_lifecycle/decks.py :lines: 56-63 ``` @@ -113,7 +113,7 @@ Creates a profile report from a Pandas DataFrame. Renders DataFrame as an HTML table. This renderer doesn't necessitate plugin installation since it's accessible within the flytekit library. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/b431ae399def3a749833fe81c2c291b016cf3213/examples/development_lifecycle/development_lifecycle/decks.py +```{literalinclude} /examples/development_lifecycle/development_lifecycle/decks.py :caption: development_lifecycle/decks.py :lines: 69-76 ``` @@ -127,7 +127,7 @@ This renderer doesn't necessitate plugin installation since it's accessible with Converts a Markdown string into HTML, producing HTML as a Unicode string. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/development_lifecycle/development_lifecycle/decks.py +```{literalinclude} /examples/development_lifecycle/development_lifecycle/decks.py :caption: development_lifecycle/decks.py :pyobject: markdown_renderer ``` @@ -147,7 +147,7 @@ The median (Q2) is indicated by a line within the box. Typically, the whiskers extend to the edges of the box, plus or minus 1.5 times the interquartile range (IQR: Q3-Q1). -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/b431ae399def3a749833fe81c2c291b016cf3213/examples/development_lifecycle/development_lifecycle/decks.py +```{literalinclude} /examples/development_lifecycle/development_lifecycle/decks.py :caption: development_lifecycle/decks.py :lines: 97-103 ``` @@ -162,7 +162,7 @@ plus or minus 1.5 times the interquartile range (IQR: Q3-Q1). Converts a {ref}`FlyteFile ` or `PIL.Image.Image` object into an HTML string, where the image data is encoded as a base64 string. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/b431ae399def3a749833fe81c2c291b016cf3213/examples/development_lifecycle/development_lifecycle/decks.py +```{literalinclude} /examples/development_lifecycle/development_lifecycle/decks.py :caption: development_lifecycle/decks.py :lines: 109-123 ``` @@ -176,7 +176,7 @@ where the image data is encoded as a base64 string. Converts a Pandas dataframe into an HTML table. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/b431ae399def3a749833fe81c2c291b016cf3213/examples/development_lifecycle/development_lifecycle/decks.py +```{literalinclude} /examples/development_lifecycle/development_lifecycle/decks.py :caption: development_lifecycle/decks.py :lines: 127-135 ``` diff --git a/docs/user_guide/development_lifecycle/failure_node.md b/docs/user_guide/development_lifecycle/failure_node.md index 9bf7e2dd52..3e3cab7149 100644 --- a/docs/user_guide/development_lifecycle/failure_node.md +++ b/docs/user_guide/development_lifecycle/failure_node.md @@ -20,21 +20,21 @@ To address this issue, you can add a failure node into your workflow. This ensur To clone and run the example code on this page, see the [Flytesnacks repo][flytesnacks]. ``` -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/development_lifecycle/development_lifecycle/failure_node.py +```{literalinclude} /examples/development_lifecycle/development_lifecycle/failure_node.py :caption: development_lifecycle/failure_node.py :lines: 1-6 ``` Create a task that will fail during execution: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/development_lifecycle/development_lifecycle/failure_node.py +```{literalinclude} /examples/development_lifecycle/development_lifecycle/failure_node.py :caption: development_lifecycle/failure_node.py :lines: 10-18 ``` Create a task that will be executed if any of the tasks in the workflow fail: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/development_lifecycle/development_lifecycle/failure_node.py +```{literalinclude} /examples/development_lifecycle/development_lifecycle/failure_node.py :caption: development_lifecycle/failure_node.py :pyobject: clean_up ``` @@ -45,21 +45,21 @@ Specify the `on_failure` to a cleanup task. This task will be executed if any of The input of `clean_up` should be the exact same as the input of the workflow. ::: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/development_lifecycle/development_lifecycle/failure_node.py +```{literalinclude} /examples/development_lifecycle/development_lifecycle/failure_node.py :caption: development_lifecycle/failure_node.py :pyobject: subwf ``` By setting the failure policy to `FAIL_AFTER_EXECUTABLE_NODES_COMPLETE` to ensure that the `wf1` is executed even if the subworkflow fails. In this case, both parent and child workflows will fail, resulting in the `clean_up` task being executed twice: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/development_lifecycle/development_lifecycle/failure_node.py +```{literalinclude} /examples/development_lifecycle/development_lifecycle/failure_node.py :caption: development_lifecycle/failure_node.py :lines: 42-53 ``` You can also set the `on_failure` to a workflow. This workflow will be executed if any of the tasks in the workflow fail: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/development_lifecycle/development_lifecycle/failure_node.py +```{literalinclude} /examples/development_lifecycle/development_lifecycle/failure_node.py :caption: development_lifecycle/failure_node.py :pyobject: wf2 ``` diff --git a/docs/user_guide/extending/custom_types.md b/docs/user_guide/extending/custom_types.md index a9670e9e8f..92a2ab5a19 100644 --- a/docs/user_guide/extending/custom_types.md +++ b/docs/user_guide/extending/custom_types.md @@ -27,7 +27,7 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte First, we import the dependencies: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/0ec8388759d34566a0ffc0c3c2d7443fd4a3a46f/examples/extending/extending/custom_types.py +```{literalinclude} /examples/extending/extending/custom_types.py :caption: extending/custom_types.py :lines: 1-7 ``` @@ -38,7 +38,7 @@ First, we import the dependencies: Defined type here represents a list of files on the disk. We will refer to it as `MyDataset`. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/0ec8388759d34566a0ffc0c3c2d7443fd4a3a46f/examples/extending/extending/custom_types.py +```{literalinclude} /examples/extending/extending/custom_types.py :caption: extending/custom_types.py :pyobject: MyDataset ``` @@ -53,7 +53,7 @@ The `TypeTransformer` is a Generic abstract base class. The `Generic` type argum that we want to work with. In this case, it is the `MyDataset` object. ::: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/0ec8388759d34566a0ffc0c3c2d7443fd4a3a46f/examples/extending/extending/custom_types.py +```{literalinclude} /examples/extending/extending/custom_types.py :caption: extending/custom_types.py :pyobject: MyDatasetTransformer ``` @@ -61,23 +61,23 @@ that we want to work with. In this case, it is the `MyDataset` object. Before we can use MyDataset in our tasks, we need to let Flytekit know that `MyDataset` should be considered as a valid type. This is done using {py:class}`~flytekit:flytekit.extend.TypeEngine`'s `register` method. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/0ec8388759d34566a0ffc0c3c2d7443fd4a3a46f/examples/extending/extending/custom_types.py +```{literalinclude} /examples/extending/extending/custom_types.py :caption: extending/custom_types.py -:lines: 87 +:lines: 86 ``` The new type should be ready to use! Let us write an example generator and consumer for this new datatype. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/0ec8388759d34566a0ffc0c3c2d7443fd4a3a46f/examples/extending/extending/custom_types.py +```{literalinclude} /examples/extending/extending/custom_types.py :caption: extending/custom_types.py -:lines: 91-114 +:lines: 90-114 ``` This workflow can be executed and tested locally. Flytekit will exercise the entire path even if you run it locally. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/0ec8388759d34566a0ffc0c3c2d7443fd4a3a46f/examples/extending/extending/custom_types.py +```{literalinclude} /examples/extending/extending/custom_types.py :caption: extending/custom_types.py -:lines: 119-120 +:lines: 118-119 ``` [flytesnacks]: https://github.com/flyteorg/flytesnacks/tree/0ec8388759d34566a0ffc0c3c2d7443fd4a3a46f/examples/extending/ diff --git a/docs/user_guide/extending/user_container_task_plugins.md b/docs/user_guide/extending/user_container_task_plugins.md index 99a3adf155..444ad9d646 100644 --- a/docs/user_guide/extending/user_container_task_plugins.md +++ b/docs/user_guide/extending/user_container_task_plugins.md @@ -32,7 +32,7 @@ def wait_and_run(path: str) -> int: return do_next(path=path) ``` -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/extending/extending/user_container.py +```{literalinclude} /examples/extending/extending/user_container.py :caption: extending/user_container.py :lines: 1-6 ``` @@ -42,7 +42,7 @@ def wait_and_run(path: str) -> int: As illustrated above, to achieve this structure we need to create a class named `WaitForObjectStoreFile`, which derives from {py:class}`flytekit.PythonFunctionTask` as follows. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/extending/extending/user_container.py +```{literalinclude} /examples/extending/extending/user_container.py :caption: extending/user_container.py :pyobject: WaitForObjectStoreFile ``` @@ -68,14 +68,14 @@ Refer to the [spark plugin](https://github.com/flyteorg/flytekit/tree/master/plu ### Actual usage -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/extending/extending/user_container.py +```{literalinclude} /examples/extending/extending/user_container.py :caption: extending/user_container.py :lines: 54-69 ``` And of course, you can run the workflow locally using your own new shiny plugin! -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/extending/extending/user_container.py +```{literalinclude} /examples/extending/extending/user_container.py :caption: extending/user_container.py :lines: 73-78 ``` diff --git a/docs/user_guide/flyte_agents/developing_agents.md b/docs/user_guide/flyte_agents/developing_agents.md index f76c662fa0..a8b2050b01 100644 --- a/docs/user_guide/flyte_agents/developing_agents.md +++ b/docs/user_guide/flyte_agents/developing_agents.md @@ -7,6 +7,7 @@ jupytext: --- (developing_agents)= + # Developing agents The Flyte agent framework enables rapid agent development, since agents are decoupled from the core FlytePropeller engine. Rather than building a complete gRPC service from scratch, you can implement an agent as a Python class, easing development. Agents can be tested independently and deployed privately, making maintenance easier and giving you more flexibility and control over development. @@ -20,8 +21,9 @@ We strongly encourage you to contribute your agent to the Flyte community. To do ``` There are two types of agents: **async** and **sync**. -* **Async agents** enable long-running jobs that execute on an external platform over time. They communicate with external services that have asynchronous APIs that support `create`, `get`, and `delete` operations. The vast majority of agents are async agents. -* **Sync agents** enable request/response services that return immediate outputs (e.g. calling an internal API to fetch data or communicating with the OpenAI API). + +- **Async agents** enable long-running jobs that execute on an external platform over time. They communicate with external services that have asynchronous APIs that support `create`, `get`, and `delete` operations. The vast majority of agents are async agents. +- **Sync agents** enable request/response services that return immediate outputs (e.g. calling an internal API to fetch data or communicating with the OpenAI API). ```{note} @@ -41,6 +43,17 @@ To create a new async agent, extend the [`AsyncAgentBase`](https://github.com/fl - `get`: This method retrieves the job resource (jobID or output literal) associated with the task, such as a BigQuery job ID or Databricks task ID. - `delete`: Invoking this method will send a request to delete the corresponding job. +```{note} + +When users use the `create` method to create a new job, with its job ID, they can use the `get` method with job ID to +check the execution state is succeeded or not. + +Exceptional `delete` case: +If users interrupt a task while it is running, FlytePropeller will invoke the `delete` method to the corresponding +job. + +``` + ```python from typing import Optional from dataclasses import dataclass @@ -113,6 +126,7 @@ AgentRegistry.register(OpenAIAgent()) ``` #### Sensor interface specification + With the agent framework, you can easily build a custom sensor in Flyte to watch certain events or monitor the bucket in your workflow. To create a new sensor, extend the `[BaseSensor](https://github.com/flyteorg/flytekit/blob/master/flytekit/sensor/base_sensor.py#L43)` class and implement the `poke` method, which checks whether a specific condition is met. @@ -130,7 +144,6 @@ class FileSensor(BaseSensor): return fs.exists(path) ``` - ### 2. Test the agent You can test your agent in a {ref}`local Python environment ` or in a {ref}`local development cluster `. @@ -181,29 +194,29 @@ By default, all agent requests will be sent to the default agent service. Howeve you can route particular task requests to designated agent services by adjusting the FlytePropeller configuration. ```yaml - plugins: - agent-service: - # By default, all requests will be sent to the default agent. - defaultAgent: - endpoint: "dns:///flyteagent.flyte.svc.cluster.local:8000" - insecure: true - timeouts: - # CreateTask, GetTask and DeleteTask are for async agents. - # ExecuteTaskSync is for sync agents. - CreateTask: 5s - GetTask: 5s - DeleteTask: 5s - ExecuteTaskSync: 10s - defaultTimeout: 10s - agents: - custom_agent: - endpoint: "dns:///custom-flyteagent.flyte.svc.cluster.local:8000" - insecure: false - defaultServiceConfig: '{"loadBalancingConfig": [{"round_robin":{}}]}' - timeouts: - GetTask: 5s - defaultTimeout: 10s - agentForTaskTypes: - # It will override the default agent for custom_task, which means propeller will send the request to this agent. - - custom_task: custom_agent +plugins: + agent-service: + # By default, all requests will be sent to the default agent. + defaultAgent: + endpoint: "dns:///flyteagent.flyte.svc.cluster.local:8000" + insecure: true + timeouts: + # CreateTask, GetTask and DeleteTask are for async agents. + # ExecuteTaskSync is for sync agents. + CreateTask: 5s + GetTask: 5s + DeleteTask: 5s + ExecuteTaskSync: 10s + defaultTimeout: 10s + agents: + custom_agent: + endpoint: "dns:///custom-flyteagent.flyte.svc.cluster.local:8000" + insecure: false + defaultServiceConfig: '{"loadBalancingConfig": [{"round_robin":{}}]}' + timeouts: + GetTask: 5s + defaultTimeout: 10s + agentForTaskTypes: + # It will override the default agent for custom_task, which means propeller will send the request to this agent. + - custom_task: custom_agent ``` diff --git a/docs/user_guide/getting_started_with_workflow_development/flyte_project_components.md b/docs/user_guide/getting_started_with_workflow_development/flyte_project_components.md index 47821c5201..4b6ce43d68 100644 --- a/docs/user_guide/getting_started_with_workflow_development/flyte_project_components.md +++ b/docs/user_guide/getting_started_with_workflow_development/flyte_project_components.md @@ -33,8 +33,8 @@ manage your project's Python requirements. ````{dropdown} See requirements.txt -```{rli} https://raw.githubusercontent.com/flyteorg/flytekit-python-template/main/basic-template-imagespec/%7B%7Bcookiecutter.project_name%7D%7D/requirements.txt -:caption: requirements.txt +```{code-block} bash +flytekit>=1.5.0 ``` ```` diff --git a/docs/user_guide/productionizing/customizing_task_resources.md b/docs/user_guide/productionizing/customizing_task_resources.md index 6ba07a604b..da885ec583 100644 --- a/docs/user_guide/productionizing/customizing_task_resources.md +++ b/docs/user_guide/productionizing/customizing_task_resources.md @@ -36,35 +36,35 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte Import the dependencies: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/productionizing/productionizing/customizing_resources.py +```{literalinclude} /examples/productionizing/productionizing/customizing_resources.py :caption: productionizing/customizing_resources.py :lines: 1-3 ``` Define a task and configure the resources to be allocated to it: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/productionizing/productionizing/customizing_resources.py +```{literalinclude} /examples/productionizing/productionizing/customizing_resources.py :caption: productionizing/customizing_resources.py :pyobject: count_unique_numbers ``` Define a task that computes the square of a number: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/productionizing/productionizing/customizing_resources.py +```{literalinclude} /examples/productionizing/productionizing/customizing_resources.py :caption: productionizing/customizing_resources.py :pyobject: square ``` You can use the tasks decorated with memory and storage hints like regular tasks in a workflow. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/productionizing/productionizing/customizing_resources.py +```{literalinclude} /examples/productionizing/productionizing/customizing_resources.py :caption: productionizing/customizing_resources.py :pyobject: my_workflow ``` You can execute the workflow locally. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/productionizing/productionizing/customizing_resources.py +```{literalinclude} /examples/productionizing/productionizing/customizing_resources.py :caption: productionizing/customizing_resources.py :lines: 32-34 ``` @@ -82,7 +82,7 @@ Let's understand how the resources can be initialized with an example. Import the dependencies. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/productionizing/productionizing/customizing_resources.py +```{literalinclude} /examples/productionizing/productionizing/customizing_resources.py :caption: productionizing/customizing_resources.py :lines: 38-40 ``` @@ -90,28 +90,28 @@ Import the dependencies. Define a task and configure the resources to be allocated to it. You can use tasks decorated with memory and storage hints like regular tasks in a workflow. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/productionizing/productionizing/customizing_resources.py +```{literalinclude} /examples/productionizing/productionizing/customizing_resources.py :caption: productionizing/customizing_resources.py :pyobject: count_unique_numbers ``` Define a task that computes the square of a number: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/productionizing/productionizing/customizing_resources.py +```{literalinclude} /examples/productionizing/productionizing/customizing_resources.py :caption: productionizing/customizing_resources.py :pyobject: square_1 ``` The `with_overrides` method overrides the old resource allocations: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/productionizing/productionizing/customizing_resources.py +```{literalinclude} /examples/productionizing/productionizing/customizing_resources.py :caption: productionizing/customizing_resources.py :pyobject: my_pipeline ``` You can execute the workflow locally: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/productionizing/productionizing/customizing_resources.py +```{literalinclude} /examples/productionizing/productionizing/customizing_resources.py :caption: productionizing/customizing_resources.py :lines: 65-67 ``` diff --git a/docs/user_guide/productionizing/reference_launch_plans.md b/docs/user_guide/productionizing/reference_launch_plans.md index bce75e4681..79b4142baf 100644 --- a/docs/user_guide/productionizing/reference_launch_plans.md +++ b/docs/user_guide/productionizing/reference_launch_plans.md @@ -16,7 +16,7 @@ Reference launch plans cannot be run locally. You must mock them out. To clone and run the example code on this page, see the [Flytesnacks repo][flytesnacks]. ``` -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/productionizing/productionizing/reference_launch_plan.py +```{literalinclude} /examples/productionizing/productionizing/reference_launch_plan.py :caption: productionizing/reference_launch_plan.py :lines: 1-36 ``` diff --git a/docs/user_guide/productionizing/reference_tasks.md b/docs/user_guide/productionizing/reference_tasks.md index d91ecd4bbc..0d611608a6 100644 --- a/docs/user_guide/productionizing/reference_tasks.md +++ b/docs/user_guide/productionizing/reference_tasks.md @@ -16,7 +16,7 @@ Reference tasks cannot be run locally. You must mock them out. To clone and run the example code on this page, see the [Flytesnacks repo][flytesnacks]. ``` -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/productionizing/productionizing/reference_task.py +```{literalinclude} /examples/productionizing/productionizing/reference_task.py :caption: productionizing/reference_task.py :lines: 1-36 ``` diff --git a/docs/user_guide/productionizing/schedules.md b/docs/user_guide/productionizing/schedules.md index fa0a6eedb0..6d28906073 100644 --- a/docs/user_guide/productionizing/schedules.md +++ b/docs/user_guide/productionizing/schedules.md @@ -25,7 +25,7 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte Consider the following example workflow: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/productionizing/productionizing/lp_schedules.py +```{literalinclude} /examples/productionizing/productionizing/lp_schedules.py :caption: productionizing/lp_schedules.py :lines: 1-14 ``` @@ -39,7 +39,7 @@ The `date_formatter_wf` workflow can be scheduled using either the `CronSchedule [Cron](https://en.wikipedia.org/wiki/Cron) expression strings use this {ref}`syntax `. An incorrect cron schedule expression would lead to failure in triggering the schedule. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/productionizing/productionizing/lp_schedules.py +```{literalinclude} /examples/productionizing/productionizing/lp_schedules.py :caption: productionizing/lp_schedules.py :lines: 17-29 ``` @@ -54,7 +54,7 @@ If you prefer to use an interval rather than a cron scheduler to schedule your w Here's an example: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/productionizing/productionizing/lp_schedules.py +```{literalinclude} /examples/productionizing/productionizing/lp_schedules.py :caption: productionizing/lp_schedules.py :lines: 34-57 ``` diff --git a/docs/user_guide/productionizing/secrets.md b/docs/user_guide/productionizing/secrets.md index 7eba15e653..538dc5d5ad 100644 --- a/docs/user_guide/productionizing/secrets.md +++ b/docs/user_guide/productionizing/secrets.md @@ -62,7 +62,7 @@ Once you've defined a secret on the Flyte backend, `flytekit` exposes a class called {py:class}`~flytekit.Secret`s, which allows you to request a secret from the configured secret manager: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/productionizing/productionizing/use_secrets.py +```{literalinclude} /examples/productionizing/productionizing/use_secrets.py :caption: productionizing/use_secrets.py :lines: 1-6, 49-53 ``` @@ -76,7 +76,7 @@ In the code below we specify two variables, `SECRET_GROUP` and `SECRET_NAME`, which maps onto the `user-info` secret that we created with `kubectl` above, with a key called `user_secret`. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/productionizing/productionizing/use_secrets.py +```{literalinclude} /examples/productionizing/productionizing/use_secrets.py :caption: productionizing/use_secrets.py :lines: 66-67 ``` @@ -92,7 +92,7 @@ invoking the {py:func}`flytekit.current_context` function, as shown below. At runtime, flytekit looks inside the task pod for an environment variable or a mounted file with a predefined name/path and loads the value. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/productionizing/productionizing/use_secrets.py +```{literalinclude} /examples/productionizing/productionizing/use_secrets.py :caption: productionizing/use_secrets.py :pyobject: secret_task ``` @@ -127,14 +127,14 @@ the same secret: In this case, the secret group will be `user-info`, with three available secret keys: `user_secret`, `username`, and `password`: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/productionizing/productionizing/use_secrets.py +```{literalinclude} /examples/productionizing/productionizing/use_secrets.py :caption: productionizing/use_secrets.py :lines: 107-108 ``` The Secret structure allows passing two fields, matching the key and the group, as previously described: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/productionizing/productionizing/use_secrets.py +```{literalinclude} /examples/productionizing/productionizing/use_secrets.py :caption: productionizing/use_secrets.py :lines: 113-124 ``` @@ -155,14 +155,14 @@ In these scenarios you can specify the `mount_requirement=Secret.MountType.FILE` In the following example we force the mounting to be an environment variable: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/productionizing/productionizing/use_secrets.py +```{literalinclude} /examples/productionizing/productionizing/use_secrets.py :caption: productionizing/use_secrets.py :lines: 139-158 ``` These tasks can be used in your workflow as usual -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/productionizing/productionizing/use_secrets.py +```{literalinclude} /examples/productionizing/productionizing/use_secrets.py :caption: productionizing/use_secrets.py :pyobject: my_secret_workflow ``` @@ -172,7 +172,7 @@ These tasks can be used in your workflow as usual The simplest way to test secret accessibility is to export the secret as an environment variable. There are some helper methods available to do so: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/productionizing/productionizing/use_secrets.py +```{literalinclude} /examples/productionizing/productionizing/use_secrets.py :caption: productionizing/use_secrets.py :lines: 172-182 ``` diff --git a/docs/user_guide/testing/mocking_tasks.md b/docs/user_guide/testing/mocking_tasks.md index b95af69b14..eb1c396075 100644 --- a/docs/user_guide/testing/mocking_tasks.md +++ b/docs/user_guide/testing/mocking_tasks.md @@ -6,42 +6,42 @@ A lot of the tasks that you write you can run locally, but some of them you will To clone and run the example code on this page, see the [Flytesnacks repo][flytesnacks]. ``` -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/testing/testing/mocking.py +```{literalinclude} /examples/testing/testing/mocking.py :caption: testing/mocking.py :lines: 1-6 ``` This is a generic SQL task (and is by default not hooked up to any datastore nor handled by any plugin), and must be mocked: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/testing/testing/mocking.py +```{literalinclude} /examples/testing/testing/mocking.py :caption: testing/mocking.py :lines: 10-16 ``` This is a task that can run locally: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/testing/testing/mocking.py +```{literalinclude} /examples/testing/testing/mocking.py :caption: testing/mocking.py :pyobject: t1 ``` Declare a workflow that chains these two tasks together. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/testing/testing/mocking.py +```{literalinclude} /examples/testing/testing/mocking.py :caption: testing/mocking.py :pyobject: my_wf ``` Without a mock, calling the workflow would typically raise an exception, but with the `task_mock` construct, which returns a `MagicMock` object, we can override the return value. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/testing/testing/mocking.py +```{literalinclude} /examples/testing/testing/mocking.py :caption: testing/mocking.py :pyobject: main_1 ``` There is another utility as well called `patch` which offers the same functionality, but in the traditional Python patching style, where the first argument is the `MagicMock` object. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/testing/testing/mocking.py +```{literalinclude} /examples/testing/testing/mocking.py :caption: testing/mocking.py :lines: 45-56 ``` diff --git a/flyteadmin/.golangci.yml b/flyteadmin/.golangci.yml index cd180b89d1..b3e4f05997 100644 --- a/flyteadmin/.golangci.yml +++ b/flyteadmin/.golangci.yml @@ -3,30 +3,25 @@ run: # because we're skipping TLS verification - for now - cmd/entrypoints/serve.go - pkg/async/messages/sqs.go - linters: disable-all: true enable: - - deadcode - errcheck - - gas + - gosec - gci - goconst - goimports - - golint - gosimple - govet - ineffassign - misspell - nakedret - staticcheck - - structcheck - typecheck - unconvert - unparam - unused - - varcheck - + - protogetter linters-settings: gci: custom-order: true @@ -35,6 +30,8 @@ linters-settings: - default - prefix(github.com/flyteorg) skip-generated: true + goconst: + ignore-tests: true issues: exclude-rules: - path: pkg/workflowengine/impl/prepare_execution.go diff --git a/flyteadmin/auth/authzserver/claims_verifier_test.go b/flyteadmin/auth/authzserver/claims_verifier_test.go index dca3cf6e22..568b248ccd 100644 --- a/flyteadmin/auth/authzserver/claims_verifier_test.go +++ b/flyteadmin/auth/authzserver/claims_verifier_test.go @@ -30,7 +30,7 @@ func Test_verifyClaims(t *testing.T) { assert.Equal(t, "my-client", identityCtx.AppID()) assert.Equal(t, "123", identityCtx.UserID()) assert.Equal(t, "https://myserver", identityCtx.Audience()) - assert.Equal(t, "byhsu@linkedin.com", identityCtx.UserInfo().Email) + assert.Equal(t, "byhsu@linkedin.com", identityCtx.UserInfo().GetEmail()) }) t.Run("Multiple audience", func(t *testing.T) { diff --git a/flyteadmin/auth/authzserver/metadata_provider_test.go b/flyteadmin/auth/authzserver/metadata_provider_test.go index c8f92fe8cc..c02825360f 100644 --- a/flyteadmin/auth/authzserver/metadata_provider_test.go +++ b/flyteadmin/auth/authzserver/metadata_provider_test.go @@ -35,10 +35,10 @@ func TestOAuth2MetadataProvider_FlyteClient(t *testing.T) { ctx := context.Background() resp, err := provider.GetPublicClientConfig(ctx, &service.PublicClientAuthConfigRequest{}) assert.NoError(t, err) - assert.Equal(t, "my-client", resp.ClientId) - assert.Equal(t, "client/", resp.RedirectUri) - assert.Equal(t, []string{"all"}, resp.Scopes) - assert.Equal(t, "http://dummyServer", resp.Audience) + assert.Equal(t, "my-client", resp.GetClientId()) + assert.Equal(t, "client/", resp.GetRedirectUri()) + assert.Equal(t, []string{"all"}, resp.GetScopes()) + assert.Equal(t, "http://dummyServer", resp.GetAudience()) } func TestOAuth2MetadataProvider_OAuth2Metadata(t *testing.T) { @@ -50,7 +50,7 @@ func TestOAuth2MetadataProvider_OAuth2Metadata(t *testing.T) { ctx := context.Background() resp, err := provider.GetOAuth2Metadata(ctx, &service.OAuth2MetadataRequest{}) assert.NoError(t, err) - assert.Equal(t, "https://issuer/", resp.Issuer) + assert.Equal(t, "https://issuer/", resp.GetIssuer()) }) var issuer string @@ -91,7 +91,7 @@ func TestOAuth2MetadataProvider_OAuth2Metadata(t *testing.T) { ctx := context.Background() resp, err := provider.GetOAuth2Metadata(ctx, &service.OAuth2MetadataRequest{}) assert.NoError(t, err) - assert.Equal(t, "https://dev-14186422.okta.com", resp.Issuer) + assert.Equal(t, "https://dev-14186422.okta.com", resp.GetIssuer()) }) t.Run("External AuthServer fallback url", func(t *testing.T) { @@ -110,7 +110,7 @@ func TestOAuth2MetadataProvider_OAuth2Metadata(t *testing.T) { ctx := context.Background() resp, err := provider.GetOAuth2Metadata(ctx, &service.OAuth2MetadataRequest{}) assert.NoError(t, err) - assert.Equal(t, "https://dev-14186422.okta.com", resp.Issuer) + assert.Equal(t, "https://dev-14186422.okta.com", resp.GetIssuer()) }) } diff --git a/flyteadmin/auth/authzserver/resource_server.go b/flyteadmin/auth/authzserver/resource_server.go index 59b984365d..917e0fcfaf 100644 --- a/flyteadmin/auth/authzserver/resource_server.go +++ b/flyteadmin/auth/authzserver/resource_server.go @@ -116,7 +116,7 @@ func getJwksForIssuer(ctx context.Context, issuerBaseURL url.URL, cfg authConfig return nil, fmt.Errorf("failed to decode provider discovery object: %v", err) } - return oidc.NewRemoteKeySet(oidc.ClientContext(ctx, httpClient), p.JwksUri), nil + return oidc.NewRemoteKeySet(oidc.ClientContext(ctx, httpClient), p.GetJwksUri()), nil } // NewOAuth2ResourceServer initializes a new OAuth2ResourceServer. diff --git a/flyteadmin/auth/authzserver/resource_server_test.go b/flyteadmin/auth/authzserver/resource_server_test.go index 6ee94184ed..9541e73ccd 100644 --- a/flyteadmin/auth/authzserver/resource_server_test.go +++ b/flyteadmin/auth/authzserver/resource_server_test.go @@ -61,7 +61,7 @@ func newMockResourceServer(t testing.TB, publicKey rsa.PublicKey) (resourceServe } w.Header().Set("Content-Type", "application/json") - _, err = io.WriteString(w, string(raw)) + _, err = io.Writer.Write(w, raw) if !assert.NoError(t, err) { t.FailNow() diff --git a/flyteadmin/auth/cookie.go b/flyteadmin/auth/cookie.go index 2470220d24..456eeb8580 100644 --- a/flyteadmin/auth/cookie.go +++ b/flyteadmin/auth/cookie.go @@ -12,6 +12,7 @@ import ( "github.com/gorilla/securecookie" "github.com/flyteorg/flyte/flyteadmin/auth/interfaces" + "github.com/flyteorg/flyte/flyteadmin/pkg/config" "github.com/flyteorg/flyte/flytestdlib/errors" "github.com/flyteorg/flyte/flytestdlib/logger" ) @@ -68,6 +69,8 @@ func NewSecureCookie(cookieName, value string, hashKey, blockKey []byte, domain Value: encoded, Domain: domain, SameSite: sameSiteMode, + HttpOnly: true, + Secure: !config.GetConfig().Security.InsecureCookieHeader, }, nil } @@ -126,6 +129,7 @@ func NewCsrfCookie() http.Cookie { Value: csrfStateToken, SameSite: http.SameSiteLaxMode, HttpOnly: true, + Secure: !config.GetConfig().Security.InsecureCookieHeader, } } @@ -164,6 +168,7 @@ func NewRedirectCookie(ctx context.Context, redirectURL string) *http.Cookie { Value: urlObj.String(), SameSite: http.SameSiteLaxMode, HttpOnly: true, + Secure: !config.GetConfig().Security.InsecureCookieHeader, } } diff --git a/flyteadmin/auth/cookie_manager.go b/flyteadmin/auth/cookie_manager.go index ce360c9d3a..8a23272d01 100644 --- a/flyteadmin/auth/cookie_manager.go +++ b/flyteadmin/auth/cookie_manager.go @@ -11,6 +11,7 @@ import ( "golang.org/x/oauth2" "github.com/flyteorg/flyte/flyteadmin/auth/config" + serverConfig "github.com/flyteorg/flyte/flyteadmin/pkg/config" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" "github.com/flyteorg/flyte/flytestdlib/errors" "github.com/flyteorg/flyte/flytestdlib/logger" @@ -218,6 +219,7 @@ func (c *CookieManager) getLogoutCookie(name string) *http.Cookie { Domain: c.domain, MaxAge: 0, HttpOnly: true, + Secure: !serverConfig.GetConfig().Security.InsecureCookieHeader, Expires: time.Now().Add(-1 * time.Hour), } } diff --git a/flyteadmin/auth/cookie_manager_test.go b/flyteadmin/auth/cookie_manager_test.go index 09d8468e83..444056ba8c 100644 --- a/flyteadmin/auth/cookie_manager_test.go +++ b/flyteadmin/auth/cookie_manager_test.go @@ -16,6 +16,7 @@ import ( "golang.org/x/oauth2" "github.com/flyteorg/flyte/flyteadmin/auth/config" + serverConfig "github.com/flyteorg/flyte/flyteadmin/pkg/config" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" ) @@ -199,34 +200,53 @@ func TestCookieManager(t *testing.T) { assert.EqualError(t, err, "[EMPTY_OAUTH_TOKEN] Error reading existing secure cookie [flyte_idt]. Error: [SECURE_COOKIE_ERROR] Error reading secure cookie flyte_idt, caused by: securecookie: error - caused by: crypto/aes: invalid key size 75") }) - t.Run("delete_cookies", func(t *testing.T) { - w := httptest.NewRecorder() - - manager.DeleteCookies(ctx, w) - - cookies := w.Result().Cookies() - require.Equal(t, 5, len(cookies)) - - assert.True(t, time.Now().After(cookies[0].Expires)) - assert.Equal(t, cookieSetting.Domain, cookies[0].Domain) - assert.Equal(t, accessTokenCookieName, cookies[0].Name) - - assert.True(t, time.Now().After(cookies[1].Expires)) - assert.Equal(t, cookieSetting.Domain, cookies[1].Domain) - assert.Equal(t, accessTokenCookieNameSplitFirst, cookies[1].Name) - - assert.True(t, time.Now().After(cookies[2].Expires)) - assert.Equal(t, cookieSetting.Domain, cookies[2].Domain) - assert.Equal(t, accessTokenCookieNameSplitSecond, cookies[2].Name) - - assert.True(t, time.Now().After(cookies[3].Expires)) - assert.Equal(t, cookieSetting.Domain, cookies[3].Domain) - assert.Equal(t, refreshTokenCookieName, cookies[3].Name) + tests := []struct { + name string + insecureCookieHeader bool + expectedSecure bool + }{ + { + name: "secure_cookies", + insecureCookieHeader: false, + expectedSecure: true, + }, + { + name: "insecure_cookies", + insecureCookieHeader: true, + expectedSecure: false, + }, + } - assert.True(t, time.Now().After(cookies[4].Expires)) - assert.Equal(t, cookieSetting.Domain, cookies[4].Domain) - assert.Equal(t, idTokenCookieName, cookies[4].Name) - }) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + w := httptest.NewRecorder() + + serverConfig.SetConfig(&serverConfig.ServerConfig{ + Security: serverConfig.ServerSecurityOptions{ + InsecureCookieHeader: tt.insecureCookieHeader, + }, + }) + + manager.DeleteCookies(ctx, w) + + cookies := w.Result().Cookies() + require.Equal(t, 5, len(cookies)) + + // Check secure flag for each cookie + for _, cookie := range cookies { + assert.Equal(t, tt.expectedSecure, cookie.Secure) + assert.True(t, time.Now().After(cookie.Expires)) + assert.Equal(t, cookieSetting.Domain, cookie.Domain) + } + + // Check cookie names + assert.Equal(t, accessTokenCookieName, cookies[0].Name) + assert.Equal(t, accessTokenCookieNameSplitFirst, cookies[1].Name) + assert.Equal(t, accessTokenCookieNameSplitSecond, cookies[2].Name) + assert.Equal(t, refreshTokenCookieName, cookies[3].Name) + assert.Equal(t, idTokenCookieName, cookies[4].Name) + }) + } t.Run("get_http_same_site_policy", func(t *testing.T) { manager.sameSitePolicy = config.SameSiteLaxMode diff --git a/flyteadmin/auth/cookie_test.go b/flyteadmin/auth/cookie_test.go index a5c58ad2ff..1134e957dc 100644 --- a/flyteadmin/auth/cookie_test.go +++ b/flyteadmin/auth/cookie_test.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "encoding/base64" - "fmt" "net/http" "net/url" "testing" @@ -14,6 +13,7 @@ import ( "github.com/flyteorg/flyte/flyteadmin/auth/config" "github.com/flyteorg/flyte/flyteadmin/auth/interfaces/mocks" + serverConfig "github.com/flyteorg/flyte/flyteadmin/pkg/config" stdConfig "github.com/flyteorg/flyte/flytestdlib/config" ) @@ -26,22 +26,53 @@ func mustParseURL(t testing.TB, u string) url.URL { return *res } -// This function can also be called locally to generate new keys func TestSecureCookieLifecycle(t *testing.T) { - hashKey := securecookie.GenerateRandomKey(64) - assert.True(t, base64.RawStdEncoding.EncodeToString(hashKey) != "") - - blockKey := securecookie.GenerateRandomKey(32) - assert.True(t, base64.RawStdEncoding.EncodeToString(blockKey) != "") - fmt.Printf("Hash key: |%s| Block key: |%s|\n", - base64.RawStdEncoding.EncodeToString(hashKey), base64.RawStdEncoding.EncodeToString(blockKey)) - - cookie, err := NewSecureCookie("choc", "chip", hashKey, blockKey, "localhost", http.SameSiteLaxMode) - assert.NoError(t, err) + tests := []struct { + name string + insecureCookieHeader bool + expectedSecure bool + }{ + { + name: "secure_cookie", + insecureCookieHeader: false, + expectedSecure: true, + }, + { + name: "insecure_cookie", + insecureCookieHeader: true, + expectedSecure: false, + }, + } - value, err := ReadSecureCookie(context.Background(), cookie, hashKey, blockKey) - assert.NoError(t, err) - assert.Equal(t, "chip", value) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Generate hash and block keys for secure cookie + hashKey := securecookie.GenerateRandomKey(64) + assert.True(t, base64.RawStdEncoding.EncodeToString(hashKey) != "") + + blockKey := securecookie.GenerateRandomKey(32) + assert.True(t, base64.RawStdEncoding.EncodeToString(blockKey) != "") + + // Set up server configuration with insecureCookieHeader option + serverConfig.SetConfig(&serverConfig.ServerConfig{ + Security: serverConfig.ServerSecurityOptions{ + InsecureCookieHeader: tt.insecureCookieHeader, + }, + }) + + // Create a secure cookie + cookie, err := NewSecureCookie("choc", "chip", hashKey, blockKey, "localhost", http.SameSiteLaxMode) + assert.NoError(t, err) + + // Validate the Secure attribute of the cookie + assert.Equal(t, tt.expectedSecure, cookie.Secure) + + // Read and validate the secure cookie value + value, err := ReadSecureCookie(context.Background(), cookie, hashKey, blockKey) + assert.NoError(t, err) + assert.Equal(t, "chip", value) + }) + } } func TestNewCsrfToken(t *testing.T) { @@ -50,9 +81,41 @@ func TestNewCsrfToken(t *testing.T) { } func TestNewCsrfCookie(t *testing.T) { - cookie := NewCsrfCookie() - assert.Equal(t, "flyte_csrf_state", cookie.Name) - assert.True(t, cookie.HttpOnly) + tests := []struct { + name string + insecureCookieHeader bool + expectedSecure bool + }{ + { + name: "secure_csrf_cookie", + insecureCookieHeader: false, + expectedSecure: true, + }, + { + name: "insecure_csrf_cookie", + insecureCookieHeader: true, + expectedSecure: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Set up server configuration with insecureCookieHeader option + serverConfig.SetConfig(&serverConfig.ServerConfig{ + Security: serverConfig.ServerSecurityOptions{ + InsecureCookieHeader: tt.insecureCookieHeader, + }, + }) + + // Generate CSRF cookie + cookie := NewCsrfCookie() + + // Validate CSRF cookie properties + assert.Equal(t, "flyte_csrf_state", cookie.Name) + assert.True(t, cookie.HttpOnly) + assert.Equal(t, tt.expectedSecure, cookie.Secure) + }) + } } func TestHashCsrfState(t *testing.T) { @@ -121,6 +184,36 @@ func TestNewRedirectCookie(t *testing.T) { assert.NotNil(t, cookie) assert.Equal(t, http.SameSiteLaxMode, cookie.SameSite) }) + + tests := []struct { + name string + insecureCookieHeader bool + expectedSecure bool + }{ + { + name: "secure_cookies", + insecureCookieHeader: false, + expectedSecure: true, + }, + { + name: "insecure_cookies", + insecureCookieHeader: true, + expectedSecure: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + serverConfig.SetConfig(&serverConfig.ServerConfig{ + Security: serverConfig.ServerSecurityOptions{ + InsecureCookieHeader: tt.insecureCookieHeader, + }, + }) + ctx := context.Background() + cookie := NewRedirectCookie(ctx, "http://www.example.com/postLogin") + assert.NotNil(t, cookie) + assert.Equal(t, cookie.Secure, tt.expectedSecure) + }) + } } func TestGetAuthFlowEndRedirect(t *testing.T) { diff --git a/flyteadmin/auth/handler_utils.go b/flyteadmin/auth/handler_utils.go index a6b4031ca8..dd7a97d1af 100644 --- a/flyteadmin/auth/handler_utils.go +++ b/flyteadmin/auth/handler_utils.go @@ -168,6 +168,7 @@ func GetRedirectURLAllowed(ctx context.Context, urlRedirectParam string, cfg *co } logger.Debugf(ctx, "validating whether redirect url: %s is authorized", redirectURL) for _, authorizedURI := range cfg.AuthorizedURIs { + authorizedURI := authorizedURI if isAuthorizedRedirectURL(redirectURL, &authorizedURI.URL) { logger.Debugf(ctx, "authorizing redirect url: %s against authorized uri: %s", redirectURL.String(), authorizedURI.String()) return true diff --git a/flyteadmin/auth/handlers.go b/flyteadmin/auth/handlers.go index d8bc626652..002744fbd1 100644 --- a/flyteadmin/auth/handlers.go +++ b/flyteadmin/auth/handlers.go @@ -524,8 +524,8 @@ func GetUserInfoForwardResponseHandler() UserInfoForwardResponseHandler { return func(ctx context.Context, w http.ResponseWriter, m proto.Message) error { info, ok := m.(*service.UserInfoResponse) if ok { - if info.AdditionalClaims != nil { - for k, v := range info.AdditionalClaims.GetFields() { + if info.GetAdditionalClaims() != nil { + for k, v := range info.GetAdditionalClaims().GetFields() { jsonBytes, err := v.MarshalJSON() if err != nil { logger.Warningf(ctx, "failed to marshal claim [%s] to json: %v", k, err) @@ -535,7 +535,7 @@ func GetUserInfoForwardResponseHandler() UserInfoForwardResponseHandler { w.Header().Set(header, string(jsonBytes)) } } - w.Header().Set("X-User-Subject", info.Subject) + w.Header().Set("X-User-Subject", info.GetSubject()) } return nil } diff --git a/flyteadmin/auth/identity_context.go b/flyteadmin/auth/identity_context.go index 05889f7537..ab30088f01 100644 --- a/flyteadmin/auth/identity_context.go +++ b/flyteadmin/auth/identity_context.go @@ -103,7 +103,7 @@ func NewIdentityContext(audience, userID, appID string, authenticatedAt time.Tim userInfo = &service.UserInfoResponse{} } - if len(userInfo.Subject) == 0 { + if len(userInfo.GetSubject()) == 0 { userInfo.Subject = userID } diff --git a/flyteadmin/auth/identity_context_test.go b/flyteadmin/auth/identity_context_test.go index 1e72042be0..fb339027a9 100644 --- a/flyteadmin/auth/identity_context_test.go +++ b/flyteadmin/auth/identity_context_test.go @@ -22,7 +22,7 @@ func TestGetClaims(t *testing.T) { assert.NoError(t, err) assert.EqualValues(t, claims, withClaimsCtx.Claims()) - assert.NotEmpty(t, withClaimsCtx.UserInfo().AdditionalClaims) + assert.NotEmpty(t, withClaimsCtx.UserInfo().GetAdditionalClaims()) } func TestWithExecutionUserIdentifier(t *testing.T) { diff --git a/flyteadmin/auth/init_secrets.go b/flyteadmin/auth/init_secrets.go index 6e3d4a3078..fa964c57e9 100644 --- a/flyteadmin/auth/init_secrets.go +++ b/flyteadmin/auth/init_secrets.go @@ -8,7 +8,6 @@ import ( "encoding/base64" "encoding/pem" "fmt" - "io/ioutil" "os" "path/filepath" @@ -78,21 +77,21 @@ type SecretsSet struct { } func writeSecrets(ctx context.Context, secrets SecretsSet, path string) error { - err := ioutil.WriteFile(filepath.Join(path, config.SecretNameClaimSymmetricKey), []byte(base64.RawStdEncoding.EncodeToString(secrets.TokenHashKey)), os.ModePerm) + err := os.WriteFile(filepath.Join(path, config.SecretNameClaimSymmetricKey), []byte(base64.RawStdEncoding.EncodeToString(secrets.TokenHashKey)), os.ModePerm) // #nosec G306 if err != nil { return fmt.Errorf("failed to persist token hash key. Error: %w", err) } logger.Infof(ctx, "wrote %v", config.SecretNameClaimSymmetricKey) - err = ioutil.WriteFile(filepath.Join(path, config.SecretNameCookieHashKey), []byte(base64.RawStdEncoding.EncodeToString(secrets.CookieHashKey)), os.ModePerm) + err = os.WriteFile(filepath.Join(path, config.SecretNameCookieHashKey), []byte(base64.RawStdEncoding.EncodeToString(secrets.CookieHashKey)), os.ModePerm) // #nosec G306 if err != nil { return fmt.Errorf("failed to persist cookie hash key. Error: %w", err) } logger.Infof(ctx, "wrote %v", config.SecretNameCookieHashKey) - err = ioutil.WriteFile(filepath.Join(path, config.SecretNameCookieBlockKey), []byte(base64.RawStdEncoding.EncodeToString(secrets.CookieBlockKey)), os.ModePerm) + err = os.WriteFile(filepath.Join(path, config.SecretNameCookieBlockKey), []byte(base64.RawStdEncoding.EncodeToString(secrets.CookieBlockKey)), os.ModePerm) // #nosec G306 if err != nil { return fmt.Errorf("failed to persist cookie block key. Error: %w", err) } diff --git a/flyteadmin/dataproxy/service.go b/flyteadmin/dataproxy/service.go index c02fa3699f..27d03e3eda 100644 --- a/flyteadmin/dataproxy/service.go +++ b/flyteadmin/dataproxy/service.go @@ -49,22 +49,22 @@ func (s Service) CreateUploadLocation(ctx context.Context, req *service.CreateUp // If it exists, and a hash was provided, then check if it matches. If it matches, then proceed as normal otherwise fail. // If it doesn't exist, then proceed as normal. - if len(req.Project) == 0 || len(req.Domain) == 0 { - logger.Infof(ctx, "project and domain are required parameters. Project [%v]. Domain [%v]", req.Project, req.Domain) + if len(req.GetProject()) == 0 || len(req.GetDomain()) == 0 { + logger.Infof(ctx, "project and domain are required parameters. Project [%v]. Domain [%v]", req.GetProject(), req.GetDomain()) return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "project and domain are required parameters") } // At least one of the hash or manually given prefix must be provided. - if len(req.FilenameRoot) == 0 && len(req.ContentMd5) == 0 { - logger.Infof(ctx, "content_md5 or filename_root is a required parameter. FilenameRoot [%v], ContentMD5 [%v]", req.FilenameRoot, req.ContentMd5) + if len(req.GetFilenameRoot()) == 0 && len(req.GetContentMd5()) == 0 { + logger.Infof(ctx, "content_md5 or filename_root is a required parameter. FilenameRoot [%v], ContentMD5 [%v]", req.GetFilenameRoot(), req.GetContentMd5()) return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "content_md5 or filename_root is a required parameter") } // If we fall in here, that means that the full path is deterministic and we should check for existence. - if len(req.Filename) > 0 && len(req.FilenameRoot) > 0 { + if len(req.GetFilename()) > 0 && len(req.GetFilenameRoot()) > 0 { knownLocation, err := createStorageLocation(ctx, s.dataStore, s.cfg.Upload, - req.Org, req.Project, req.Domain, req.FilenameRoot, req.Filename) + req.GetOrg(), req.GetProject(), req.GetDomain(), req.GetFilenameRoot(), req.GetFilename()) if err != nil { logger.Errorf(ctx, "failed to create storage location. Error %v", err) return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to create storage location, Error: %v", err) @@ -78,15 +78,15 @@ func (s Service) CreateUploadLocation(ctx context.Context, req *service.CreateUp // Basically if the file exists, then error unless the user also provided a hash and it matches. // Keep in mind this is just a best effort attempt. There can easily be race conditions where two users // request the same file at the same time and one of the writes is lost. - if len(req.ContentMd5) == 0 { + if len(req.GetContentMd5()) == 0 { return nil, errors.NewFlyteAdminErrorf(codes.AlreadyExists, "file already exists at location [%v], specify a matching hash if you wish to rewrite", knownLocation) } - base64Digest := base64.StdEncoding.EncodeToString(req.ContentMd5) + base64Digest := base64.StdEncoding.EncodeToString(req.GetContentMd5()) if len(metadata.ContentMD5()) == 0 { // For backward compatibility, dataproxy assumes that the Etag exists if ContentMD5 is not in the metadata. // Data proxy won't allow people to overwrite the file if both the Etag and the ContentMD5 do not exist. - hexDigest := hex.EncodeToString(req.ContentMd5) - base32Digest := base32.StdEncoding.EncodeToString(req.ContentMd5) + hexDigest := hex.EncodeToString(req.GetContentMd5()) + base32Digest := base32.StdEncoding.EncodeToString(req.GetContentMd5()) if hexDigest != metadata.Etag() && base32Digest != metadata.Etag() && base64Digest != metadata.Etag() { logger.Errorf(ctx, "File already exists at location [%v] but hashes do not match", knownLocation) return nil, errors.NewFlyteAdminErrorf(codes.AlreadyExists, "file already exists at location [%v], specify a matching hash if you wish to rewrite", knownLocation) @@ -99,7 +99,7 @@ func (s Service) CreateUploadLocation(ctx context.Context, req *service.CreateUp } } - if expiresIn := req.ExpiresIn; expiresIn != nil { + if expiresIn := req.GetExpiresIn(); expiresIn != nil { if !expiresIn.IsValid() { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "expiresIn [%v] is invalid", expiresIn) } @@ -112,21 +112,21 @@ func (s Service) CreateUploadLocation(ctx context.Context, req *service.CreateUp req.ExpiresIn = durationpb.New(s.cfg.Upload.MaxExpiresIn.Duration) } - if len(req.Filename) == 0 { + if len(req.GetFilename()) == 0 { req.Filename = rand.String(s.cfg.Upload.DefaultFileNameLength) } - base64digestMD5 := base64.StdEncoding.EncodeToString(req.ContentMd5) + base64digestMD5 := base64.StdEncoding.EncodeToString(req.GetContentMd5()) var prefix string - if len(req.FilenameRoot) > 0 { - prefix = req.FilenameRoot + if len(req.GetFilenameRoot()) > 0 { + prefix = req.GetFilenameRoot() } else { // url safe base32 encoding - prefix = base32.StdEncoding.EncodeToString(req.ContentMd5) + prefix = base32.StdEncoding.EncodeToString(req.GetContentMd5()) } storagePath, err := createStorageLocation(ctx, s.dataStore, s.cfg.Upload, - req.Org, req.Project, req.Domain, prefix, req.Filename) + req.GetOrg(), req.GetProject(), req.GetDomain(), prefix, req.GetFilename()) if err != nil { logger.Errorf(ctx, "failed to create shardedStorageLocation. Error %v", err) return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to create shardedStorageLocation, Error: %v", err) @@ -134,9 +134,9 @@ func (s Service) CreateUploadLocation(ctx context.Context, req *service.CreateUp resp, err := s.dataStore.CreateSignedURL(ctx, storagePath, storage.SignedURLProperties{ Scope: stow.ClientMethodPut, - ExpiresIn: req.ExpiresIn.AsDuration(), + ExpiresIn: req.GetExpiresIn().AsDuration(), ContentMD5: base64digestMD5, - AddContentMD5Metadata: req.AddContentMd5Metadata, + AddContentMD5Metadata: req.GetAddContentMd5Metadata(), }) if err != nil { @@ -147,7 +147,7 @@ func (s Service) CreateUploadLocation(ctx context.Context, req *service.CreateUp return &service.CreateUploadLocationResponse{ SignedUrl: resp.URL.String(), NativeUrl: storagePath.String(), - ExpiresAt: timestamppb.New(time.Now().Add(req.ExpiresIn.AsDuration())), + ExpiresAt: timestamppb.New(time.Now().Add(req.GetExpiresIn().AsDuration())), Headers: resp.RequiredRequestHeaders, }, nil } @@ -172,7 +172,7 @@ func (s Service) CreateDownloadLink(ctx context.Context, req *service.CreateDown switch req.GetArtifactType() { case service.ArtifactType_ARTIFACT_TYPE_DECK: - nativeURL = node.Closure.DeckUri + nativeURL = node.GetClosure().GetDeckUri() } } else { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "unsupported source [%v]", reflect.TypeOf(req.GetSource())) @@ -194,7 +194,7 @@ func (s Service) CreateDownloadLink(ctx context.Context, req *service.CreateDown signedURLResp, err := s.dataStore.CreateSignedURL(ctx, ref, storage.SignedURLProperties{ Scope: stow.ClientMethodGet, - ExpiresIn: req.ExpiresIn.AsDuration(), + ExpiresIn: req.GetExpiresIn().AsDuration(), }) if err != nil { @@ -202,7 +202,7 @@ func (s Service) CreateDownloadLink(ctx context.Context, req *service.CreateDown } u := []string{signedURLResp.URL.String()} - ts := timestamppb.New(time.Now().Add(req.ExpiresIn.AsDuration())) + ts := timestamppb.New(time.Now().Add(req.GetExpiresIn().AsDuration())) // return &service.CreateDownloadLinkResponse{ @@ -223,9 +223,9 @@ func (s Service) CreateDownloadLocation(ctx context.Context, req *service.Create return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "error while validating request: %v", err) } - resp, err := s.dataStore.CreateSignedURL(ctx, storage.DataReference(req.NativeUrl), storage.SignedURLProperties{ + resp, err := s.dataStore.CreateSignedURL(ctx, storage.DataReference(req.GetNativeUrl()), storage.SignedURLProperties{ Scope: stow.ClientMethodGet, - ExpiresIn: req.ExpiresIn.AsDuration(), + ExpiresIn: req.GetExpiresIn().AsDuration(), }) if err != nil { @@ -234,21 +234,21 @@ func (s Service) CreateDownloadLocation(ctx context.Context, req *service.Create return &service.CreateDownloadLocationResponse{ SignedUrl: resp.URL.String(), - ExpiresAt: timestamppb.New(time.Now().Add(req.ExpiresIn.AsDuration())), + ExpiresAt: timestamppb.New(time.Now().Add(req.GetExpiresIn().AsDuration())), }, nil } func (s Service) validateCreateDownloadLocationRequest(req *service.CreateDownloadLocationRequest) error { - validatedExpiresIn, err := validateDuration(req.ExpiresIn, s.cfg.Download.MaxExpiresIn.Duration) + validatedExpiresIn, err := validateDuration(req.GetExpiresIn(), s.cfg.Download.MaxExpiresIn.Duration) if err != nil { return fmt.Errorf("expiresIn is invalid. Error: %w", err) } req.ExpiresIn = validatedExpiresIn - if _, err := url.Parse(req.NativeUrl); err != nil { + if _, err := url.Parse(req.GetNativeUrl()); err != nil { return fmt.Errorf("failed to parse native_url [%v]", - req.NativeUrl) + req.GetNativeUrl()) } return nil @@ -275,7 +275,7 @@ func validateDuration(input *durationpb.Duration, maxAllowed time.Duration) (*du } func (s Service) validateCreateDownloadLinkRequest(req *service.CreateDownloadLinkRequest) (*service.CreateDownloadLinkRequest, error) { - validatedExpiresIn, err := validateDuration(req.ExpiresIn, s.cfg.Download.MaxExpiresIn.Duration) + validatedExpiresIn, err := validateDuration(req.GetExpiresIn(), s.cfg.Download.MaxExpiresIn.Duration) if err != nil { return nil, fmt.Errorf("expiresIn is invalid. Error: %w", err) } @@ -328,16 +328,16 @@ func (s Service) GetCompleteTaskExecutionID(ctx context.Context, taskExecID *cor taskExecs, err := s.taskExecutionManager.ListTaskExecutions(ctx, &admin.TaskExecutionListRequest{ NodeExecutionId: taskExecID.GetNodeExecutionId(), Limit: 1, - Filters: fmt.Sprintf("eq(retry_attempt,%s)", strconv.Itoa(int(taskExecID.RetryAttempt))), + Filters: fmt.Sprintf("eq(retry_attempt,%s)", strconv.Itoa(int(taskExecID.GetRetryAttempt()))), }) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "failed to list task executions [%v]. Error: %v", taskExecID, err) } - if len(taskExecs.TaskExecutions) == 0 { + if len(taskExecs.GetTaskExecutions()) == 0 { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "no task executions were listed [%v]. Error: %v", taskExecID, err) } - taskExec := taskExecs.TaskExecutions[0] - return taskExec.Id, nil + taskExec := taskExecs.GetTaskExecutions()[0] + return taskExec.GetId(), nil } func (s Service) GetTaskExecutionID(ctx context.Context, attempt int, nodeExecID *core.NodeExecutionIdentifier) (*core.TaskExecutionIdentifier, error) { @@ -349,11 +349,11 @@ func (s Service) GetTaskExecutionID(ctx context.Context, attempt int, nodeExecID if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "failed to list task executions [%v]. Error: %v", nodeExecID, err) } - if len(taskExecs.TaskExecutions) == 0 { + if len(taskExecs.GetTaskExecutions()) == 0 { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "no task executions were listed [%v]. Error: %v", nodeExecID, err) } - taskExec := taskExecs.TaskExecutions[0] - return taskExec.Id, nil + taskExec := taskExecs.GetTaskExecutions()[0] + return taskExec.GetId(), nil } func (s Service) GetDataFromNodeExecution(ctx context.Context, nodeExecID *core.NodeExecutionIdentifier, ioType common.ArtifactType, name string) ( @@ -368,9 +368,9 @@ func (s Service) GetDataFromNodeExecution(ctx context.Context, nodeExecID *core. var lm *core.LiteralMap if ioType == common.ArtifactTypeI { - lm = resp.FullInputs + lm = resp.GetFullInputs() } else if ioType == common.ArtifactTypeO { - lm = resp.FullOutputs + lm = resp.GetFullOutputs() } else { // Assume deck, and create a download link request dlRequest := service.CreateDownloadLinkRequest{ @@ -383,13 +383,13 @@ func (s Service) GetDataFromNodeExecution(ctx context.Context, nodeExecID *core. } return &service.GetDataResponse{ Data: &service.GetDataResponse_PreSignedUrls{ - PreSignedUrls: resp.PreSignedUrls, + PreSignedUrls: resp.GetPreSignedUrls(), }, }, nil } if name != "" { - if literal, ok := lm.Literals[name]; ok { + if literal, ok := lm.GetLiterals()[name]; ok { return &service.GetDataResponse{ Data: &service.GetDataResponse_Literal{ Literal: literal, @@ -418,15 +418,15 @@ func (s Service) GetDataFromTaskExecution(ctx context.Context, taskExecID *core. } if ioType == common.ArtifactTypeI { - lm = resp.FullInputs + lm = resp.GetFullInputs() } else if ioType == common.ArtifactTypeO { - lm = resp.FullOutputs + lm = resp.GetFullOutputs() } else { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "deck type cannot be specified with a retry attempt, just use the node instead") } if name != "" { - if literal, ok := lm.Literals[name]; ok { + if literal, ok := lm.GetLiterals()[name]; ok { return &service.GetDataResponse{ Data: &service.GetDataResponse_Literal{ Literal: literal, diff --git a/flyteadmin/go.mod b/flyteadmin/go.mod index 5c008a46eb..82e2189f34 100644 --- a/flyteadmin/go.mod +++ b/flyteadmin/go.mod @@ -21,7 +21,7 @@ require ( github.com/ghodss/yaml v1.0.0 github.com/go-gormigrate/gormigrate/v2 v2.1.1 github.com/golang-jwt/jwt v3.2.2+incompatible - github.com/golang-jwt/jwt/v4 v4.5.0 + github.com/golang-jwt/jwt/v4 v4.5.1 github.com/golang/glog v1.2.0 github.com/golang/protobuf v1.5.3 github.com/google/uuid v1.6.0 @@ -48,7 +48,7 @@ require ( github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 - github.com/wI2L/jsondiff v0.5.0 + github.com/wI2L/jsondiff v0.6.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 go.opentelemetry.io/otel v1.24.0 golang.org/x/net v0.27.0 @@ -167,6 +167,7 @@ require ( github.com/prometheus/common v0.53.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/santhosh-tekuri/jsonschema v1.2.4 // indirect github.com/sendgrid/rest v2.6.9+incompatible // indirect github.com/shamaton/msgpack/v2 v2.2.2 // indirect github.com/sirupsen/logrus v1.9.3 // indirect @@ -176,10 +177,11 @@ require ( github.com/spf13/viper v1.11.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.2.0 // indirect - github.com/tidwall/gjson v1.17.0 // indirect + github.com/tidwall/gjson v1.17.1 // indirect github.com/tidwall/match v1.1.1 // indirect - github.com/tidwall/pretty v1.2.0 // indirect + github.com/tidwall/pretty v1.2.1 // indirect github.com/tidwall/sjson v1.2.5 // indirect + gitlab.com/yvesf/json-schema-compare v0.0.0-20190604192943-a900c04201f7 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect go.opentelemetry.io/otel/exporters/jaeger v1.17.0 // indirect diff --git a/flyteadmin/go.sum b/flyteadmin/go.sum index ec5e0cdc1c..afd775c3ba 100644 --- a/flyteadmin/go.sum +++ b/flyteadmin/go.sum @@ -611,8 +611,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= @@ -1159,6 +1159,7 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc= github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU= +github.com/santhosh-tekuri/jsonschema v1.2.4 h1:hNhW8e7t+H1vgY+1QeEQpveR6D4+OwKPXCfD2aieJis= github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= github.com/santhosh-tekuri/jsonschema/v2 v2.1.0/go.mod h1:yzJzKUGV4RbWqWIBBP4wSOBqavX5saE02yirLS0OTyg= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= @@ -1271,8 +1272,8 @@ github.com/tidwall/gjson v1.3.2/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJH github.com/tidwall/gjson v1.6.8/go.mod h1:zeFuBCIqD4sN/gmqBzZ4j7Jd6UcA2Fc56x7QFsv+8fI= github.com/tidwall/gjson v1.7.1/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk= github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= -github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.17.1 h1:wlYEnwqAHgzmhNUFfw7Xalt2JzQvsMx2Se4PcoFCT/U= +github.com/tidwall/gjson v1.17.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= @@ -1280,8 +1281,9 @@ github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JT github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.2/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.1.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/sjson v1.0.4/go.mod h1:bURseu1nuBkFpIES5cz6zBtjmYeOQmEESshn7VpF15Y= github.com/tidwall/sjson v1.1.5/go.mod h1:VuJzsZnTowhSxWdOgsAnb886i4AjEyTkk7tNtsL7EYE= github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= @@ -1302,8 +1304,8 @@ github.com/unrolled/secure v0.0.0-20181005190816-ff9db2ff917f/go.mod h1:mnPT77IA github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/wI2L/jsondiff v0.5.0 h1:RRMTi/mH+R2aXcPe1VYyvGINJqQfC3R+KSEakuU1Ikw= -github.com/wI2L/jsondiff v0.5.0/go.mod h1:qqG6hnK0Lsrz2BpIVCxWiK9ItsBCpIZQiv0izJjOZ9s= +github.com/wI2L/jsondiff v0.6.0 h1:zrsH3FbfVa3JO9llxrcDy/XLkYPLgoMX6Mz3T2PP2AI= +github.com/wI2L/jsondiff v0.6.0/go.mod h1:D6aQ5gKgPF9g17j+E9N7aasmU1O+XvfmWm1y8UMmNpw= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= @@ -1319,6 +1321,8 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +gitlab.com/yvesf/json-schema-compare v0.0.0-20190604192943-a900c04201f7 h1:BAkxmYRc1ZPl6Gap4HWqwPT8yLZMrgaAwx12Ft408sg= +gitlab.com/yvesf/json-schema-compare v0.0.0-20190604192943-a900c04201f7/go.mod h1:X40Z1OU8o1oiXWzBmkuYOaruzYGv60l0AxGiB0E9keI= go.elastic.co/apm v1.8.0/go.mod h1:tCw6CkOJgkWnzEthFN9HUP1uL3Gjc/Ur6m7gRPLaoH0= go.elastic.co/apm/module/apmhttp v1.8.0/go.mod h1:9LPFlEON51/lRbnWDfqAWErihIiAFDUMfMV27YjoWQ8= go.elastic.co/apm/module/apmot v1.8.0/go.mod h1:Q5Xzabte8G/fkvDjr1jlDuOSUt9hkVWNZEHh6ZNaTjI= diff --git a/flyteadmin/pkg/async/cloudevent/implementations/cloudevent_publisher.go b/flyteadmin/pkg/async/cloudevent/implementations/cloudevent_publisher.go index 7aaab0bb60..e21d9b0b5a 100644 --- a/flyteadmin/pkg/async/cloudevent/implementations/cloudevent_publisher.go +++ b/flyteadmin/pkg/async/cloudevent/implementations/cloudevent_publisher.go @@ -57,20 +57,20 @@ func (p *Publisher) Publish(ctx context.Context, notificationType string, msg pr switch msgType := msg.(type) { case *admin.WorkflowExecutionEventRequest: - e := msgType.Event - executionID = e.ExecutionId.String() - phase = e.Phase.String() - eventTime = e.OccurredAt.AsTime() + e := msgType.GetEvent() + executionID = e.GetExecutionId().String() + phase = e.GetPhase().String() + eventTime = e.GetOccurredAt().AsTime() case *admin.TaskExecutionEventRequest: - e := msgType.Event - executionID = e.TaskId.String() - phase = e.Phase.String() - eventTime = e.OccurredAt.AsTime() + e := msgType.GetEvent() + executionID = e.GetTaskId().String() + phase = e.GetPhase().String() + eventTime = e.GetOccurredAt().AsTime() case *admin.NodeExecutionEventRequest: - e := msgType.Event - executionID = msgType.Event.Id.String() - phase = e.Phase.String() - eventTime = e.OccurredAt.AsTime() + e := msgType.GetEvent() + executionID = msgType.GetEvent().GetId().String() + phase = e.GetPhase().String() + eventTime = e.GetOccurredAt().AsTime() default: return fmt.Errorf("unsupported event types [%+v]", reflect.TypeOf(msg)) } @@ -128,13 +128,13 @@ func (c *CloudEventWrappedPublisher) TransformWorkflowExecutionEvent(ctx context if rawEvent == nil { return nil, fmt.Errorf("nothing to publish, WorkflowExecution event is nil") } - if rawEvent.ExecutionId == nil { + if rawEvent.GetExecutionId() == nil { logger.Warningf(ctx, "nil execution id in event [%+v]", rawEvent) return nil, fmt.Errorf("nil execution id in event [%+v]", rawEvent) } // For now, don't append any additional information unless succeeded - if rawEvent.Phase != core.WorkflowExecution_SUCCEEDED { + if rawEvent.GetPhase() != core.WorkflowExecution_SUCCEEDED { return &event.CloudEventWorkflowExecution{ RawEvent: rawEvent, }, nil @@ -142,35 +142,35 @@ func (c *CloudEventWrappedPublisher) TransformWorkflowExecutionEvent(ctx context // TODO: Make this one call to the DB instead of two. executionModel, err := c.db.ExecutionRepo().Get(ctx, repositoryInterfaces.Identifier{ - Project: rawEvent.ExecutionId.Project, - Domain: rawEvent.ExecutionId.Domain, - Name: rawEvent.ExecutionId.Name, + Project: rawEvent.GetExecutionId().GetProject(), + Domain: rawEvent.GetExecutionId().GetDomain(), + Name: rawEvent.GetExecutionId().GetName(), }) if err != nil { - logger.Warningf(ctx, "couldn't find execution [%+v] for cloud event processing", rawEvent.ExecutionId) + logger.Warningf(ctx, "couldn't find execution [%+v] for cloud event processing", rawEvent.GetExecutionId()) return nil, err } ex, err := transformers.FromExecutionModel(ctx, executionModel, transformers.DefaultExecutionTransformerOptions) if err != nil { - logger.Warningf(ctx, "couldn't transform execution [%+v] for cloud event processing", rawEvent.ExecutionId) + logger.Warningf(ctx, "couldn't transform execution [%+v] for cloud event processing", rawEvent.GetExecutionId()) return nil, err } - if ex.Closure.WorkflowId == nil { + if ex.GetClosure().GetWorkflowId() == nil { logger.Warningf(ctx, "workflow id is nil for execution [%+v]", ex) return nil, fmt.Errorf("workflow id is nil for execution [%+v]", ex) } workflowModel, err := c.db.WorkflowRepo().Get(ctx, repositoryInterfaces.Identifier{ - Project: ex.Closure.WorkflowId.Project, - Domain: ex.Closure.WorkflowId.Domain, - Name: ex.Closure.WorkflowId.Name, - Version: ex.Closure.WorkflowId.Version, + Project: ex.GetClosure().GetWorkflowId().GetProject(), + Domain: ex.GetClosure().GetWorkflowId().GetDomain(), + Name: ex.GetClosure().GetWorkflowId().GetName(), + Version: ex.GetClosure().GetWorkflowId().GetVersion(), }) if err != nil { - logger.Warningf(ctx, "couldn't find workflow [%+v] for cloud event processing", ex.Closure.WorkflowId) + logger.Warningf(ctx, "couldn't find workflow [%+v] for cloud event processing", ex.GetClosure().GetWorkflowId()) return nil, err } var workflowInterface core.TypedInterface - if workflowModel.TypedInterface != nil && len(workflowModel.TypedInterface) > 0 { + if len(workflowModel.TypedInterface) > 0 { err = proto.Unmarshal(workflowModel.TypedInterface, &workflowInterface) if err != nil { return nil, fmt.Errorf( @@ -191,15 +191,15 @@ func (c *CloudEventWrappedPublisher) TransformWorkflowExecutionEvent(ctx context OutputInterface: &workflowInterface, ArtifactIds: spec.GetMetadata().GetArtifactIds(), ReferenceExecution: spec.GetMetadata().GetReferenceExecution(), - Principal: spec.GetMetadata().Principal, - LaunchPlanId: spec.LaunchPlan, + Principal: spec.GetMetadata().GetPrincipal(), + LaunchPlanId: spec.GetLaunchPlan(), }, nil } func getNodeExecutionContext(ctx context.Context, identifier *core.NodeExecutionIdentifier) context.Context { - ctx = contextutils.WithProjectDomain(ctx, identifier.ExecutionId.Project, identifier.ExecutionId.Domain) - ctx = contextutils.WithExecutionID(ctx, identifier.ExecutionId.Name) - return contextutils.WithNodeID(ctx, identifier.NodeId) + ctx = contextutils.WithProjectDomain(ctx, identifier.GetExecutionId().GetProject(), identifier.GetExecutionId().GetDomain()) + ctx = contextutils.WithExecutionID(ctx, identifier.GetExecutionId().GetName()) + return contextutils.WithNodeID(ctx, identifier.GetNodeId()) } // This is a rough copy of the ListTaskExecutions function in TaskExecutionManager. It can be deprecated once we move the processing out of Admin itself. @@ -230,7 +230,7 @@ func (c *CloudEventWrappedPublisher) getLatestTaskExecutions(ctx context.Context if err != nil { return nil, err } - if output.TaskExecutions == nil || len(output.TaskExecutions) == 0 { + if len(output.TaskExecutions) == 0 { logger.Debugf(ctx, "no task executions found for node exec id [%+v]", nodeExecutionID) return nil, nil } @@ -245,16 +245,16 @@ func (c *CloudEventWrappedPublisher) getLatestTaskExecutions(ctx context.Context } func (c *CloudEventWrappedPublisher) TransformNodeExecutionEvent(ctx context.Context, rawEvent *event.NodeExecutionEvent) (*event.CloudEventNodeExecution, error) { - if rawEvent == nil || rawEvent.Id == nil { + if rawEvent == nil || rawEvent.GetId() == nil { return nil, fmt.Errorf("nothing to publish, NodeExecution event or ID is nil") } // Skip nodes unless they're succeeded and not start nodes - if rawEvent.Phase != core.NodeExecution_SUCCEEDED { + if rawEvent.GetPhase() != core.NodeExecution_SUCCEEDED { return &event.CloudEventNodeExecution{ RawEvent: rawEvent, }, nil - } else if rawEvent.Id.NodeId == "start-node" { + } else if rawEvent.GetId().GetNodeId() == "start-node" { return &event.CloudEventNodeExecution{ RawEvent: rawEvent, }, nil @@ -263,12 +263,12 @@ func (c *CloudEventWrappedPublisher) TransformNodeExecutionEvent(ctx context.Con // This gets the parent workflow execution metadata executionModel, err := c.db.ExecutionRepo().Get(ctx, repositoryInterfaces.Identifier{ - Project: rawEvent.Id.ExecutionId.Project, - Domain: rawEvent.Id.ExecutionId.Domain, - Name: rawEvent.Id.ExecutionId.Name, + Project: rawEvent.GetId().GetExecutionId().GetProject(), + Domain: rawEvent.GetId().GetExecutionId().GetDomain(), + Name: rawEvent.GetId().GetExecutionId().GetName(), }) if err != nil { - logger.Infof(ctx, "couldn't find execution [%+v] for cloud event processing", rawEvent.Id.ExecutionId) + logger.Infof(ctx, "couldn't find execution [%+v] for cloud event processing", rawEvent.GetId().GetExecutionId()) return nil, err } @@ -283,22 +283,22 @@ func (c *CloudEventWrappedPublisher) TransformNodeExecutionEvent(ctx context.Con var taskExecID *core.TaskExecutionIdentifier var typedInterface *core.TypedInterface - lte, err := c.getLatestTaskExecutions(ctx, rawEvent.Id) + lte, err := c.getLatestTaskExecutions(ctx, rawEvent.GetId()) if err != nil { - logger.Errorf(ctx, "failed to get latest task execution for node exec id [%+v] with err: %v", rawEvent.Id, err) + logger.Errorf(ctx, "failed to get latest task execution for node exec id [%+v] with err: %v", rawEvent.GetId(), err) return nil, err } if lte != nil { taskModel, err := c.db.TaskRepo().Get(ctx, repositoryInterfaces.Identifier{ - Project: lte.Id.TaskId.Project, - Domain: lte.Id.TaskId.Domain, - Name: lte.Id.TaskId.Name, - Version: lte.Id.TaskId.Version, + Project: lte.GetId().GetTaskId().GetProject(), + Domain: lte.GetId().GetTaskId().GetDomain(), + Name: lte.GetId().GetTaskId().GetName(), + Version: lte.GetId().GetTaskId().GetVersion(), }) if err != nil { // TODO: metric this // metric - logger.Debugf(ctx, "Failed to get task with task id [%+v] with err %v", lte.Id.TaskId, err) + logger.Debugf(ctx, "Failed to get task with task id [%+v] with err %v", lte.GetId().GetTaskId(), err) return nil, err } task, err := transformers.FromTaskModel(taskModel) @@ -306,8 +306,8 @@ func (c *CloudEventWrappedPublisher) TransformNodeExecutionEvent(ctx context.Con logger.Debugf(ctx, "Failed to transform task model with err %v", err) return nil, err } - typedInterface = task.Closure.CompiledTask.Template.Interface - taskExecID = lte.Id + typedInterface = task.GetClosure().GetCompiledTask().GetTemplate().GetInterface() + taskExecID = lte.GetId() } return &event.CloudEventNodeExecution{ @@ -315,8 +315,8 @@ func (c *CloudEventWrappedPublisher) TransformNodeExecutionEvent(ctx context.Con TaskExecId: taskExecID, OutputInterface: typedInterface, ArtifactIds: spec.GetMetadata().GetArtifactIds(), - Principal: spec.GetMetadata().Principal, - LaunchPlanId: spec.LaunchPlan, + Principal: spec.GetMetadata().GetPrincipal(), + LaunchPlanId: spec.GetLaunchPlan(), }, nil } @@ -348,14 +348,14 @@ func (c *CloudEventWrappedPublisher) Publish(ctx context.Context, notificationTy switch msgType := msg.(type) { case *admin.WorkflowExecutionEventRequest: topic = "cloudevents.WorkflowExecution" - e := msgType.Event - executionID = e.ExecutionId.String() - phase = e.Phase.String() - eventTime = e.OccurredAt.AsTime() + e := msgType.GetEvent() + executionID = e.GetExecutionId().String() + phase = e.GetPhase().String() + eventTime = e.GetOccurredAt().AsTime() dummyNodeExecutionID := &core.NodeExecutionIdentifier{ NodeId: "end-node", - ExecutionId: e.ExecutionId, + ExecutionId: e.GetExecutionId(), } // This forms part of the key in the Artifact store, // but it should probably be entirely derived by that service instead. @@ -369,17 +369,17 @@ func (c *CloudEventWrappedPublisher) Publish(ctx context.Context, notificationTy case *admin.TaskExecutionEventRequest: topic = "cloudevents.TaskExecution" - e := msgType.Event - executionID = e.TaskId.String() - phase = e.Phase.String() - eventTime = e.OccurredAt.AsTime() + e := msgType.GetEvent() + executionID = e.GetTaskId().String() + phase = e.GetPhase().String() + eventTime = e.GetOccurredAt().AsTime() eventID = fmt.Sprintf("%v.%v", executionID, phase) - if e.ParentNodeExecutionId == nil { + if e.GetParentNodeExecutionId() == nil { return fmt.Errorf("parent node execution id is nil for task execution [%+v]", e) } - eventSource = common.FlyteURLKeyFromNodeExecutionIDRetry(e.ParentNodeExecutionId, - int(e.RetryAttempt)) + eventSource = common.FlyteURLKeyFromNodeExecutionIDRetry(e.GetParentNodeExecutionId(), + int(e.GetRetryAttempt())) finalMsg, err = c.TransformTaskExecutionEvent(ctx, e) if err != nil { logger.Errorf(ctx, "Failed to transform task execution event with error: %v", err) @@ -387,12 +387,12 @@ func (c *CloudEventWrappedPublisher) Publish(ctx context.Context, notificationTy } case *admin.NodeExecutionEventRequest: topic = "cloudevents.NodeExecution" - e := msgType.Event - executionID = msgType.Event.Id.String() - phase = e.Phase.String() - eventTime = e.OccurredAt.AsTime() + e := msgType.GetEvent() + executionID = msgType.GetEvent().GetId().String() + phase = e.GetPhase().String() + eventTime = e.GetOccurredAt().AsTime() eventID = fmt.Sprintf("%v.%v", executionID, phase) - eventSource = common.FlyteURLKeyFromNodeExecutionID(msgType.Event.Id) + eventSource = common.FlyteURLKeyFromNodeExecutionID(msgType.GetEvent().GetId()) finalMsg, err = c.TransformNodeExecutionEvent(ctx, e) if err != nil { logger.Errorf(ctx, "Failed to transform node execution event with error: %v", err) @@ -400,7 +400,7 @@ func (c *CloudEventWrappedPublisher) Publish(ctx context.Context, notificationTy } case *event.CloudEventExecutionStart: topic = "cloudevents.ExecutionStart" - executionID = msgType.ExecutionId.String() + executionID = msgType.GetExecutionId().String() eventID = fmt.Sprintf("%v", executionID) eventTime = time.Now() // CloudEventExecutionStart don't have a nested event diff --git a/flyteadmin/pkg/async/notifications/email.go b/flyteadmin/pkg/async/notifications/email.go index a89210cead..e23806cc12 100644 --- a/flyteadmin/pkg/async/notifications/email.go +++ b/flyteadmin/pkg/async/notifications/email.go @@ -30,58 +30,58 @@ const launchPlanVersion = "launch_plan.version" const replaceAllInstances = -1 func getProject(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { - return exec.Id.Project + return exec.GetId().GetProject() } func getDomain(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { - return exec.Id.Domain + return exec.GetId().GetDomain() } func getName(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { - return exec.Id.Name + return exec.GetId().GetName() } func getPhase(request *admin.WorkflowExecutionEventRequest, _ *admin.Execution) string { - return strings.ToLower(request.Event.Phase.String()) + return strings.ToLower(request.GetEvent().GetPhase().String()) } func getError(request *admin.WorkflowExecutionEventRequest, _ *admin.Execution) string { - if request.Event.GetError() != nil { - return fmt.Sprintf(executionError, request.Event.GetError().Message) + if request.GetEvent().GetError() != nil { + return fmt.Sprintf(executionError, request.GetEvent().GetError().GetMessage()) } return "" } func getWorkflowProject(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { - return exec.Closure.WorkflowId.Project + return exec.GetClosure().GetWorkflowId().GetProject() } func getWorkflowDomain(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { - return exec.Closure.WorkflowId.Domain + return exec.GetClosure().GetWorkflowId().GetDomain() } func getWorkflowName(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { - return exec.Closure.WorkflowId.Name + return exec.GetClosure().GetWorkflowId().GetName() } func getWorkflowVersion(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { - return exec.Closure.WorkflowId.Version + return exec.GetClosure().GetWorkflowId().GetVersion() } func getLaunchPlanProject(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { - return exec.Spec.LaunchPlan.Project + return exec.GetSpec().GetLaunchPlan().GetProject() } func getLaunchPlanDomain(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { - return exec.Spec.LaunchPlan.Domain + return exec.GetSpec().GetLaunchPlan().GetDomain() } func getLaunchPlanName(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { - return exec.Spec.LaunchPlan.Name + return exec.GetSpec().GetLaunchPlan().GetName() } func getLaunchPlanVersion(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { - return exec.Spec.LaunchPlan.Version + return exec.GetSpec().GetLaunchPlan().GetVersion() } var getTemplateValueFuncs = map[string]GetTemplateValue{ diff --git a/flyteadmin/pkg/async/notifications/email_test.go b/flyteadmin/pkg/async/notifications/email_test.go index 35f351a45d..0d045603e8 100644 --- a/flyteadmin/pkg/async/notifications/email_test.go +++ b/flyteadmin/pkg/async/notifications/email_test.go @@ -155,8 +155,8 @@ func TestToEmailMessageFromWorkflowExecutionEvent(t *testing.T) { SubjectLine: `Notice: Execution "e124" has succeeded in "prod".`, Body: `Execution "e124" has succeeded in "prod". View details at https://example.com/executions/proj/prod/e124.`, } - assert.True(t, emailMessage.Body == expected.Body) - assert.True(t, emailMessage.SubjectLine == expected.SubjectLine) - assert.True(t, emailMessage.SenderEmail == expected.SenderEmail) - assert.True(t, len(emailMessage.RecipientsEmail) == len(expected.RecipientsEmail)) + assert.True(t, emailMessage.GetBody() == expected.GetBody()) + assert.True(t, emailMessage.GetSubjectLine() == expected.GetSubjectLine()) + assert.True(t, emailMessage.GetSenderEmail() == expected.GetSenderEmail()) + assert.True(t, len(emailMessage.GetRecipientsEmail()) == len(expected.GetRecipientsEmail())) } diff --git a/flyteadmin/pkg/async/notifications/implementations/aws_emailer.go b/flyteadmin/pkg/async/notifications/implementations/aws_emailer.go index 712bd7080d..d9891d616b 100644 --- a/flyteadmin/pkg/async/notifications/implementations/aws_emailer.go +++ b/flyteadmin/pkg/async/notifications/implementations/aws_emailer.go @@ -23,7 +23,7 @@ type AwsEmailer struct { func FlyteEmailToSesEmailInput(email *admin.EmailMessage) ses.SendEmailInput { var toAddress []*string - for _, toEmail := range email.RecipientsEmail { + for _, toEmail := range email.GetRecipientsEmail() { // SES email input takes an array of pointers to strings so we have to create a new one for each email //nolint:unconvert e := string(toEmail) @@ -61,7 +61,7 @@ func (e *AwsEmailer) SendEmail(ctx context.Context, email *admin.EmailMessage) e e.systemMetrics.SendError.Inc() return errors.NewFlyteAdminErrorf(codes.Internal, "errors were seen while sending emails") } - logger.Debugf(ctx, "Sent email to %s sub: %s", email.RecipientsEmail, email.SubjectLine) + logger.Debugf(ctx, "Sent email to %s sub: %s", email.GetRecipientsEmail(), email.GetSubjectLine()) e.systemMetrics.SendSuccess.Inc() return nil } diff --git a/flyteadmin/pkg/async/notifications/implementations/aws_emailer_test.go b/flyteadmin/pkg/async/notifications/implementations/aws_emailer_test.go index 01a2a06273..1caf1ce3a1 100644 --- a/flyteadmin/pkg/async/notifications/implementations/aws_emailer_test.go +++ b/flyteadmin/pkg/async/notifications/implementations/aws_emailer_test.go @@ -46,18 +46,18 @@ func TestAwsEmailer_SendEmail(t *testing.T) { sendEmailValidationFunc := func(input *ses.SendEmailInput) (*ses.SendEmailOutput, error) { assert.Equal(t, *input.Source, expectedSenderEmail) - assert.Equal(t, *input.Message.Body.Html.Data, emailNotification.Body) - assert.Equal(t, *input.Message.Subject.Data, emailNotification.SubjectLine) + assert.Equal(t, *input.Message.Body.Html.Data, emailNotification.GetBody()) + assert.Equal(t, *input.Message.Subject.Data, emailNotification.GetSubjectLine()) for _, toEmail := range input.Destination.ToAddresses { var foundEmail = false - for _, verifyToEmail := range emailNotification.RecipientsEmail { + for _, verifyToEmail := range emailNotification.GetRecipientsEmail() { if *toEmail == verifyToEmail { foundEmail = true } } assert.Truef(t, foundEmail, "To Email address [%s] wasn't apart of original inputs.", *toEmail) } - assert.Equal(t, len(input.Destination.ToAddresses), len(emailNotification.RecipientsEmail)) + assert.Equal(t, len(input.Destination.ToAddresses), len(emailNotification.GetRecipientsEmail())) return &ses.SendEmailOutput{}, nil } mockAwsEmail.SetSendEmailFunc(sendEmailValidationFunc) @@ -80,8 +80,8 @@ func TestFlyteEmailToSesEmailInput(t *testing.T) { } sesEmailInput := FlyteEmailToSesEmailInput(emailNotification) - assert.Equal(t, *sesEmailInput.Destination.ToAddresses[0], emailNotification.RecipientsEmail[0]) - assert.Equal(t, *sesEmailInput.Destination.ToAddresses[1], emailNotification.RecipientsEmail[1]) + assert.Equal(t, *sesEmailInput.Destination.ToAddresses[0], emailNotification.GetRecipientsEmail()[0]) + assert.Equal(t, *sesEmailInput.Destination.ToAddresses[1], emailNotification.GetRecipientsEmail()[1]) assert.Equal(t, *sesEmailInput.Message.Subject.Data, "Notice: Execution \"name\" has succeeded in \"domain\".") } diff --git a/flyteadmin/pkg/async/notifications/implementations/aws_processor_test.go b/flyteadmin/pkg/async/notifications/implementations/aws_processor_test.go index e566fdd740..611cebceb2 100644 --- a/flyteadmin/pkg/async/notifications/implementations/aws_processor_test.go +++ b/flyteadmin/pkg/async/notifications/implementations/aws_processor_test.go @@ -31,10 +31,10 @@ func TestProcessor_StartProcessing(t *testing.T) { testSubscriber.JSONMessages = append(testSubscriber.JSONMessages, testSubscriberMessage) sendEmailValidationFunc := func(ctx context.Context, email *admin.EmailMessage) error { - assert.Equal(t, email.Body, testEmail.Body) - assert.Equal(t, email.RecipientsEmail, testEmail.RecipientsEmail) - assert.Equal(t, email.SubjectLine, testEmail.SubjectLine) - assert.Equal(t, email.SenderEmail, testEmail.SenderEmail) + assert.Equal(t, email.GetBody(), testEmail.GetBody()) + assert.Equal(t, email.GetRecipientsEmail(), testEmail.GetRecipientsEmail()) + assert.Equal(t, email.GetSubjectLine(), testEmail.GetSubjectLine()) + assert.Equal(t, email.GetSenderEmail(), testEmail.GetSenderEmail()) return nil } mockEmailer.SetSendEmailFunc(sendEmailValidationFunc) diff --git a/flyteadmin/pkg/async/notifications/implementations/gcp_processor_test.go b/flyteadmin/pkg/async/notifications/implementations/gcp_processor_test.go index d48efeeee9..6d0f7d87fe 100644 --- a/flyteadmin/pkg/async/notifications/implementations/gcp_processor_test.go +++ b/flyteadmin/pkg/async/notifications/implementations/gcp_processor_test.go @@ -35,10 +35,10 @@ func TestGcpProcessor_StartProcessing(t *testing.T) { testGcpProcessor := NewGcpProcessor(&testGcpSubscriber, &mockGcpEmailer, promutils.NewTestScope()) sendEmailValidationFunc := func(ctx context.Context, email *admin.EmailMessage) error { - assert.Equal(t, email.Body, testEmail.Body) - assert.Equal(t, email.RecipientsEmail, testEmail.RecipientsEmail) - assert.Equal(t, email.SubjectLine, testEmail.SubjectLine) - assert.Equal(t, email.SenderEmail, testEmail.SenderEmail) + assert.Equal(t, email.GetBody(), testEmail.GetBody()) + assert.Equal(t, email.GetRecipientsEmail(), testEmail.GetRecipientsEmail()) + assert.Equal(t, email.GetSubjectLine(), testEmail.GetSubjectLine()) + assert.Equal(t, email.GetSenderEmail(), testEmail.GetSenderEmail()) return nil } mockGcpEmailer.SetSendEmailFunc(sendEmailValidationFunc) diff --git a/flyteadmin/pkg/async/notifications/implementations/noop_notifications.go b/flyteadmin/pkg/async/notifications/implementations/noop_notifications.go index 03dfa063ea..adae0d92fc 100644 --- a/flyteadmin/pkg/async/notifications/implementations/noop_notifications.go +++ b/flyteadmin/pkg/async/notifications/implementations/noop_notifications.go @@ -16,7 +16,7 @@ type NoopEmail struct{} func (n *NoopEmail) SendEmail(ctx context.Context, email *admin.EmailMessage) error { logger.Debugf(ctx, "received noop SendEmail request with subject [%s] and recipient [%s]", - email.SubjectLine, strings.Join(email.RecipientsEmail, ",")) + email.GetSubjectLine(), strings.Join(email.GetRecipientsEmail(), ",")) return nil } diff --git a/flyteadmin/pkg/async/notifications/implementations/sandbox_processor_test.go b/flyteadmin/pkg/async/notifications/implementations/sandbox_processor_test.go index 83594284a9..17251ca723 100644 --- a/flyteadmin/pkg/async/notifications/implementations/sandbox_processor_test.go +++ b/flyteadmin/pkg/async/notifications/implementations/sandbox_processor_test.go @@ -20,10 +20,10 @@ func TestSandboxProcessor_StartProcessingSuccess(t *testing.T) { testSandboxProcessor := NewSandboxProcessor(msgChan, &mockSandboxEmailer) sendEmailValidationFunc := func(ctx context.Context, email *admin.EmailMessage) error { - assert.Equal(t, testEmail.Body, email.Body) - assert.Equal(t, testEmail.RecipientsEmail, email.RecipientsEmail) - assert.Equal(t, testEmail.SubjectLine, email.SubjectLine) - assert.Equal(t, testEmail.SenderEmail, email.SenderEmail) + assert.Equal(t, testEmail.GetBody(), email.GetBody()) + assert.Equal(t, testEmail.GetRecipientsEmail(), email.GetRecipientsEmail()) + assert.Equal(t, testEmail.GetSubjectLine(), email.GetSubjectLine()) + assert.Equal(t, testEmail.GetSenderEmail(), email.GetSenderEmail()) return nil } diff --git a/flyteadmin/pkg/async/notifications/implementations/sendgrid_emailer.go b/flyteadmin/pkg/async/notifications/implementations/sendgrid_emailer.go index c8386bd41e..a325cbee75 100644 --- a/flyteadmin/pkg/async/notifications/implementations/sendgrid_emailer.go +++ b/flyteadmin/pkg/async/notifications/implementations/sendgrid_emailer.go @@ -34,15 +34,15 @@ func getSendgridEmail(adminEmail *admin.EmailMessage) *mail.SGMailV3 { m := mail.NewV3Mail() // This from email address is really here as a formality. For sendgrid specifically, the sender email is determined // from the api key that's used, not what you send along here. - from := mail.NewEmail("Flyte Notifications", adminEmail.SenderEmail) - content := mail.NewContent("text/html", adminEmail.Body) + from := mail.NewEmail("Flyte Notifications", adminEmail.GetSenderEmail()) + content := mail.NewContent("text/html", adminEmail.GetBody()) m.SetFrom(from) m.AddContent(content) personalization := mail.NewPersonalization() - emailAddresses := getEmailAddresses(adminEmail.RecipientsEmail) + emailAddresses := getEmailAddresses(adminEmail.GetRecipientsEmail()) personalization.AddTos(emailAddresses...) - personalization.Subject = adminEmail.SubjectLine + personalization.Subject = adminEmail.GetSubjectLine() m.AddPersonalizations(personalization) return m diff --git a/flyteadmin/pkg/async/notifications/implementations/smtp_emailer.go b/flyteadmin/pkg/async/notifications/implementations/smtp_emailer.go index 5a705bc0c1..29a79b1589 100644 --- a/flyteadmin/pkg/async/notifications/implementations/smtp_emailer.go +++ b/flyteadmin/pkg/async/notifications/implementations/smtp_emailer.go @@ -72,11 +72,11 @@ func (s *SMTPEmailer) SendEmail(ctx context.Context, email *admin.EmailMessage) s.smtpClient = smtpClient } - if err := s.smtpClient.Mail(email.SenderEmail); err != nil { + if err := s.smtpClient.Mail(email.GetSenderEmail()); err != nil { return s.emailError(ctx, fmt.Sprintf("Error creating email instance: %s", err)) } - for _, recipient := range email.RecipientsEmail { + for _, recipient := range email.GetRecipientsEmail() { if err := s.smtpClient.Rcpt(recipient); err != nil { return s.emailError(ctx, fmt.Sprintf("Error adding email recipient: %s", err)) } @@ -113,8 +113,8 @@ func (s *SMTPEmailer) emailError(ctx context.Context, error string) error { func createMailBody(emailSender string, email *admin.EmailMessage) string { headerMap := make(map[string]string) headerMap["From"] = emailSender - headerMap["To"] = strings.Join(email.RecipientsEmail, ",") - headerMap["Subject"] = email.SubjectLine + headerMap["To"] = strings.Join(email.GetRecipientsEmail(), ",") + headerMap["Subject"] = email.GetSubjectLine() headerMap["Content-Type"] = "text/html; charset=\"UTF-8\"" mailMessage := "" @@ -123,7 +123,7 @@ func createMailBody(emailSender string, email *admin.EmailMessage) string { mailMessage += fmt.Sprintf("%s: %s\r\n", k, v) } - mailMessage += "\r\n" + email.Body + mailMessage += "\r\n" + email.GetBody() return mailMessage } @@ -140,7 +140,7 @@ func NewSMTPEmailer(ctx context.Context, config runtimeInterfaces.NotificationsC auth := smtp.PlainAuth("", emailConf.SMTPUsername, smtpPassword, emailConf.SMTPServer) - // #nosec G402 + // #nosec G402: Allow skipping TLS verification in specific environments. tlsConfiguration = &tls.Config{ InsecureSkipVerify: emailConf.SMTPSkipTLSVerify, ServerName: emailConf.SMTPServer, diff --git a/flyteadmin/pkg/async/schedule/aws/cloud_watch_scheduler.go b/flyteadmin/pkg/async/schedule/aws/cloud_watch_scheduler.go index 9c3cb166b5..d4249e9122 100644 --- a/flyteadmin/pkg/async/schedule/aws/cloud_watch_scheduler.go +++ b/flyteadmin/pkg/async/schedule/aws/cloud_watch_scheduler.go @@ -78,7 +78,7 @@ func getScheduleName(scheduleNamePrefix string, identifier *core.Identifier) str func getScheduleDescription(identifier *core.Identifier) string { return fmt.Sprintf(scheduleDescriptionFormat, - identifier.Project, identifier.Domain, identifier.Name) + identifier.GetProject(), identifier.GetDomain(), identifier.GetName()) } func getScheduleExpression(schedule *admin.Schedule) (string, error) { @@ -88,11 +88,11 @@ func getScheduleExpression(schedule *admin.Schedule) (string, error) { if schedule.GetRate() != nil { // AWS uses pluralization for units of values not equal to 1. // See https://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html - unit := strings.ToLower(schedule.GetRate().Unit.String()) - if schedule.GetRate().Value != 1 { + unit := strings.ToLower(schedule.GetRate().GetUnit().String()) + if schedule.GetRate().GetValue() != 1 { unit = fmt.Sprintf("%ss", unit) } - return fmt.Sprintf(rateExpression, schedule.GetRate().Value, unit), nil + return fmt.Sprintf(rateExpression, schedule.GetRate().GetValue(), unit), nil } logger.Debugf(context.Background(), "scheduler encountered invalid schedule expression: %s", schedule.String()) return "", errors.NewFlyteAdminErrorf(codes.InvalidArgument, "unrecognized schedule expression") @@ -176,9 +176,9 @@ func (s *cloudWatchScheduler) CreateScheduleInput(ctx context.Context, appConfig payload, err := SerializeScheduleWorkflowPayload( schedule.GetKickoffTimeInputArg(), &admin.NamedEntityIdentifier{ - Project: identifier.Project, - Domain: identifier.Domain, - Name: identifier.Name, + Project: identifier.GetProject(), + Domain: identifier.GetDomain(), + Name: identifier.GetName(), }) if err != nil { logger.Errorf(ctx, "failed to serialize schedule workflow payload for launch plan: %v with err: %v", diff --git a/flyteadmin/pkg/async/schedule/aws/shared.go b/flyteadmin/pkg/async/schedule/aws/shared.go index 3868e05799..e21b25ed5b 100644 --- a/flyteadmin/pkg/async/schedule/aws/shared.go +++ b/flyteadmin/pkg/async/schedule/aws/shared.go @@ -12,7 +12,7 @@ import ( func hashIdentifier(identifier *core.Identifier) uint64 { h := fnv.New64() _, err := h.Write([]byte(fmt.Sprintf(scheduleNameInputsFormat, - identifier.Project, identifier.Domain, identifier.Name))) + identifier.GetProject(), identifier.GetDomain(), identifier.GetName()))) if err != nil { // This shouldn't occur. logger.Errorf(context.Background(), diff --git a/flyteadmin/pkg/async/schedule/aws/workflow_executor.go b/flyteadmin/pkg/async/schedule/aws/workflow_executor.go index c4a5d75d14..d9a21c9026 100644 --- a/flyteadmin/pkg/async/schedule/aws/workflow_executor.go +++ b/flyteadmin/pkg/async/schedule/aws/workflow_executor.go @@ -63,18 +63,18 @@ var doNotconsumeBase64 = false func (e *workflowExecutor) resolveKickoffTimeArg( request ScheduledWorkflowExecutionRequest, launchPlan *admin.LaunchPlan, executionRequest *admin.ExecutionCreateRequest) error { - if request.KickoffTimeArg == "" || launchPlan.Closure.ExpectedInputs == nil { + if request.KickoffTimeArg == "" || launchPlan.GetClosure().GetExpectedInputs() == nil { logger.Debugf(context.Background(), "No kickoff time to resolve for scheduled workflow execution: [%s/%s/%s]", - executionRequest.Project, executionRequest.Domain, executionRequest.Name) + executionRequest.GetProject(), executionRequest.GetDomain(), executionRequest.GetName()) return nil } - for name := range launchPlan.Closure.ExpectedInputs.Parameters { + for name := range launchPlan.GetClosure().GetExpectedInputs().GetParameters() { if name == request.KickoffTimeArg { ts, err := ptypes.TimestampProto(request.KickoffTime) if err != nil { logger.Warningf(context.Background(), "failed to serialize kickoff time %+v to timestamp proto for scheduled workflow execution with "+ - "launchPlan [%+v]", request.KickoffTime, launchPlan.Id) + "launchPlan [%+v]", request.KickoffTime, launchPlan.GetId()) return errors.NewFlyteAdminErrorf( codes.Internal, "could not serialize kickoff time %+v to timestamp proto", request.KickoffTime) } @@ -96,7 +96,7 @@ func (e *workflowExecutor) resolveKickoffTimeArg( } logger.Warningf(context.Background(), "expected kickoff time arg with launch plan [%+v] but did not find any matching expected input to resolve", - launchPlan.Id) + launchPlan.GetId()) return nil } @@ -112,23 +112,24 @@ func (e *workflowExecutor) getActiveLaunchPlanVersion(launchPlanIdentifier *admi e.metrics.NoActiveLaunchPlanVersionsFound.Inc() return &admin.LaunchPlan{}, err } - if len(launchPlans.LaunchPlans) != 1 { + if len(launchPlans.GetLaunchPlans()) != 1 { e.metrics.GreaterThan1LaunchPlanVersionsFound.Inc() logger.Warningf(context.Background(), "failed to get exactly one active launch plan for identifier: %+v", launchPlanIdentifier) return &admin.LaunchPlan{}, errors.NewFlyteAdminErrorf(codes.Internal, "failed to get exactly one active launch plan for identifier: %+v", launchPlanIdentifier) } - return launchPlans.LaunchPlans[0], nil + return launchPlans.GetLaunchPlans()[0], nil } func generateExecutionName(launchPlan *admin.LaunchPlan, kickoffTime time.Time) string { hashedIdentifier := hashIdentifier(&core.Identifier{ - Project: launchPlan.Id.Project, - Domain: launchPlan.Id.Domain, - Name: launchPlan.Id.Name, + Project: launchPlan.GetId().GetProject(), + Domain: launchPlan.GetId().GetDomain(), + Name: launchPlan.GetId().GetName(), }) - randomSeed := kickoffTime.UnixNano() + int64(hashedIdentifier) + randomSeed := kickoffTime.UnixNano() + int64(hashedIdentifier) // #nosec G115 + return common.GetExecutionName(randomSeed) } @@ -137,7 +138,7 @@ func (e *workflowExecutor) formulateExecutionCreateRequest( // Deterministically assign a name based on the schedule kickoff time/launch plan definition. name := generateExecutionName(launchPlan, kickoffTime) logger.Debugf(context.Background(), "generated name [%s] for scheduled execution with launch plan [%+v]", - name, launchPlan.Id) + name, launchPlan.GetId()) kickoffTimeProto, err := ptypes.TimestampProto(kickoffTime) if err != nil { // We expected that kickoff times are valid (in order for a scheduled event to fire). @@ -148,11 +149,11 @@ func (e *workflowExecutor) formulateExecutionCreateRequest( kickoffTime, err) } executionRequest := &admin.ExecutionCreateRequest{ - Project: launchPlan.Id.Project, - Domain: launchPlan.Id.Domain, + Project: launchPlan.GetId().GetProject(), + Domain: launchPlan.GetId().GetDomain(), Name: name, Spec: &admin.ExecutionSpec{ - LaunchPlan: launchPlan.Id, + LaunchPlan: launchPlan.GetId(), Metadata: &admin.ExecutionMetadata{ Mode: admin.ExecutionMetadata_SCHEDULED, ScheduledAt: kickoffTimeProto, @@ -208,8 +209,8 @@ func (e *workflowExecutor) run() error { } executionRequest := e.formulateExecutionCreateRequest(launchPlan, scheduledWorkflowExecutionRequest.KickoffTime) - ctx = contextutils.WithWorkflowID(ctx, fmt.Sprintf(workflowIdentifierFmt, executionRequest.Project, - executionRequest.Domain, executionRequest.Name)) + ctx = contextutils.WithWorkflowID(ctx, fmt.Sprintf(workflowIdentifierFmt, executionRequest.GetProject(), + executionRequest.GetDomain(), executionRequest.GetName())) err = e.resolveKickoffTimeArg(scheduledWorkflowExecutionRequest, launchPlan, executionRequest) if err != nil { e.metrics.FailedResolveKickoffTimeArg.Inc() @@ -228,12 +229,12 @@ func (e *workflowExecutor) run() error { if ok && ec.Code() != codes.AlreadyExists { e.metrics.FailedKickoffExecution.Inc() logger.Errorf(context.Background(), "failed to execute scheduled workflow [%s:%s:%s] with err: %v", - executionRequest.Project, executionRequest.Domain, executionRequest.Name, err) + executionRequest.GetProject(), executionRequest.GetDomain(), executionRequest.GetName(), err) continue } } else { logger.Debugf(context.Background(), "created scheduled workflow execution %+v with kickoff time %+v", - response.Id, scheduledWorkflowExecutionRequest.KickoffTime) + response.GetId(), scheduledWorkflowExecutionRequest.KickoffTime) } executionLaunchTime := time.Now() diff --git a/flyteadmin/pkg/async/schedule/aws/workflow_executor_test.go b/flyteadmin/pkg/async/schedule/aws/workflow_executor_test.go index f6fc9b9693..38f8afddbd 100644 --- a/flyteadmin/pkg/async/schedule/aws/workflow_executor_test.go +++ b/flyteadmin/pkg/async/schedule/aws/workflow_executor_test.go @@ -82,9 +82,9 @@ func TestResolveKickoffTimeArg(t *testing.T) { }, } executionRequest := &admin.ExecutionCreateRequest{ - Project: testIdentifier.Project, - Domain: testIdentifier.Domain, - Name: testIdentifier.Name, + Project: testIdentifier.GetProject(), + Domain: testIdentifier.GetDomain(), + Name: testIdentifier.GetName(), Inputs: &core.LiteralMap{ Literals: map[string]*core.Literal{}, }, @@ -92,9 +92,9 @@ func TestResolveKickoffTimeArg(t *testing.T) { testExecutor := newWorkflowExecutorForTest(nil, nil, nil) err := testExecutor.resolveKickoffTimeArg(scheduleRequest, launchPlan, executionRequest) assert.Nil(t, err) - assert.Contains(t, executionRequest.Inputs.Literals, testKickoffTime) + assert.Contains(t, executionRequest.GetInputs().GetLiterals(), testKickoffTime) assert.Equal(t, testKickoffTimeProtoLiteral, - executionRequest.Inputs.Literals[testKickoffTime]) + executionRequest.GetInputs().GetLiterals()[testKickoffTime]) } func TestResolveKickoffTimeArg_NoKickoffTimeArg(t *testing.T) { @@ -112,9 +112,9 @@ func TestResolveKickoffTimeArg_NoKickoffTimeArg(t *testing.T) { }, } executionRequest := &admin.ExecutionCreateRequest{ - Project: testIdentifier.Project, - Domain: testIdentifier.Domain, - Name: testIdentifier.Name, + Project: testIdentifier.GetProject(), + Domain: testIdentifier.GetDomain(), + Name: testIdentifier.GetName(), Inputs: &core.LiteralMap{ Literals: map[string]*core.Literal{}, }, @@ -122,7 +122,7 @@ func TestResolveKickoffTimeArg_NoKickoffTimeArg(t *testing.T) { testExecutor := newWorkflowExecutorForTest(nil, nil, nil) err := testExecutor.resolveKickoffTimeArg(scheduleRequest, launchPlan, executionRequest) assert.Nil(t, err) - assert.NotContains(t, executionRequest.Inputs.Literals, testKickoffTime) + assert.NotContains(t, executionRequest.GetInputs().GetLiterals(), testKickoffTime) } func TestGetActiveLaunchPlanVersion(t *testing.T) { @@ -132,9 +132,9 @@ func TestGetActiveLaunchPlanVersion(t *testing.T) { Name: "name", } launchPlanIdentifier := core.Identifier{ - Project: launchPlanNamedIdentifier.Project, - Domain: launchPlanNamedIdentifier.Domain, - Name: launchPlanNamedIdentifier.Name, + Project: launchPlanNamedIdentifier.GetProject(), + Domain: launchPlanNamedIdentifier.GetDomain(), + Name: launchPlanNamedIdentifier.GetName(), Version: "foo", } @@ -142,9 +142,9 @@ func TestGetActiveLaunchPlanVersion(t *testing.T) { launchPlanManager.(*mocks.MockLaunchPlanManager).SetListLaunchPlansCallback( func(ctx context.Context, request *admin.ResourceListRequest) ( *admin.LaunchPlanList, error) { - assert.True(t, proto.Equal(launchPlanNamedIdentifier, request.Id)) - assert.Equal(t, "eq(state,1)", request.Filters) - assert.Equal(t, uint32(1), request.Limit) + assert.True(t, proto.Equal(launchPlanNamedIdentifier, request.GetId())) + assert.Equal(t, "eq(state,1)", request.GetFilters()) + assert.Equal(t, uint32(1), request.GetLimit()) return &admin.LaunchPlanList{ LaunchPlans: []*admin.LaunchPlan{ { @@ -156,7 +156,7 @@ func TestGetActiveLaunchPlanVersion(t *testing.T) { testExecutor := newWorkflowExecutorForTest(nil, nil, launchPlanManager) launchPlan, err := testExecutor.getActiveLaunchPlanVersion(launchPlanNamedIdentifier) assert.Nil(t, err) - assert.True(t, proto.Equal(&launchPlanIdentifier, launchPlan.Id)) + assert.True(t, proto.Equal(&launchPlanIdentifier, launchPlan.GetId())) } func TestGetActiveLaunchPlanVersion_ManagerError(t *testing.T) { @@ -198,13 +198,13 @@ func TestFormulateExecutionCreateRequest(t *testing.T) { } testExecutor := newWorkflowExecutorForTest(nil, nil, nil) executionRequest := testExecutor.formulateExecutionCreateRequest(launchPlan, time.Unix(1543607788, 0)) - assert.Equal(t, "foo", executionRequest.Project) - assert.Equal(t, "bar", executionRequest.Domain) - assert.Equal(t, "a2k4s9v5j246kwmdmh4t", executionRequest.Name) + assert.Equal(t, "foo", executionRequest.GetProject()) + assert.Equal(t, "bar", executionRequest.GetDomain()) + assert.Equal(t, "a2k4s9v5j246kwmdmh4t", executionRequest.GetName()) - assert.True(t, proto.Equal(&launchPlanIdentifier, executionRequest.Spec.LaunchPlan)) - assert.Equal(t, admin.ExecutionMetadata_SCHEDULED, executionRequest.Spec.Metadata.Mode) - assert.Equal(t, int64(1543607788), executionRequest.Spec.Metadata.ScheduledAt.Seconds) + assert.True(t, proto.Equal(&launchPlanIdentifier, executionRequest.GetSpec().GetLaunchPlan())) + assert.Equal(t, admin.ExecutionMetadata_SCHEDULED, executionRequest.GetSpec().GetMetadata().GetMode()) + assert.Equal(t, int64(1543607788), executionRequest.GetSpec().GetMetadata().GetScheduledAt().GetSeconds()) } func TestRun(t *testing.T) { @@ -234,12 +234,12 @@ func TestRun(t *testing.T) { testExecutionManager.SetCreateCallback(func( ctx context.Context, request *admin.ExecutionCreateRequest, requestedAt time.Time) ( *admin.ExecutionCreateResponse, error) { - assert.Equal(t, "project", request.Project) - assert.Equal(t, "domain", request.Domain) - assert.Equal(t, "ar8fphnlc5wh9dksjncj", request.Name) + assert.Equal(t, "project", request.GetProject()) + assert.Equal(t, "domain", request.GetDomain()) + assert.Equal(t, "ar8fphnlc5wh9dksjncj", request.GetName()) if messagesSeen == 0 { - assert.Contains(t, request.Inputs.Literals, testKickoffTime) - assert.Equal(t, testKickoffTimeProtoLiteral, request.Inputs.Literals[testKickoffTime]) + assert.Contains(t, request.GetInputs().GetLiterals(), testKickoffTime) + assert.Equal(t, testKickoffTimeProtoLiteral, request.GetInputs().GetLiterals()[testKickoffTime]) } messagesSeen++ return &admin.ExecutionCreateResponse{}, nil @@ -248,10 +248,10 @@ func TestRun(t *testing.T) { launchPlanManager.(*mocks.MockLaunchPlanManager).SetListLaunchPlansCallback( func(ctx context.Context, request *admin.ResourceListRequest) ( *admin.LaunchPlanList, error) { - assert.Equal(t, "project", request.Id.Project) - assert.Equal(t, "domain", request.Id.Domain) - assert.Equal(t, "eq(state,1)", request.Filters) - assert.Equal(t, uint32(1), request.Limit) + assert.Equal(t, "project", request.GetId().GetProject()) + assert.Equal(t, "domain", request.GetId().GetDomain()) + assert.Equal(t, "eq(state,1)", request.GetFilters()) + assert.Equal(t, uint32(1), request.GetLimit()) return &admin.LaunchPlanList{ LaunchPlans: []*admin.LaunchPlan{ { diff --git a/flyteadmin/pkg/async/schedule/mocks/mock_event_scheduler.go b/flyteadmin/pkg/async/schedule/mocks/mock_event_scheduler.go index fb9aebe34e..244cd0b40e 100644 --- a/flyteadmin/pkg/async/schedule/mocks/mock_event_scheduler.go +++ b/flyteadmin/pkg/async/schedule/mocks/mock_event_scheduler.go @@ -22,9 +22,9 @@ func (s *MockEventScheduler) CreateScheduleInput(ctx context.Context, appConfig payload, _ := aws.SerializeScheduleWorkflowPayload( schedule.GetKickoffTimeInputArg(), &admin.NamedEntityIdentifier{ - Project: identifier.Project, - Domain: identifier.Domain, - Name: identifier.Name, + Project: identifier.GetProject(), + Domain: identifier.GetDomain(), + Name: identifier.GetName(), }) return interfaces.AddScheduleInput{Identifier: identifier, ScheduleExpression: schedule, Payload: payload}, nil } diff --git a/flyteadmin/pkg/clusterresource/controller.go b/flyteadmin/pkg/clusterresource/controller.go index 6ea1731909..37cfa46054 100644 --- a/flyteadmin/pkg/clusterresource/controller.go +++ b/flyteadmin/pkg/clusterresource/controller.go @@ -209,8 +209,8 @@ func (c *controller) getCustomTemplateValues( collectedErrs = append(collectedErrs, err) } } - if attributes != nil && attributes.Attributes != nil { - for templateKey, templateValue := range attributes.Attributes { + if attributes != nil && attributes.GetAttributes() != nil { + for templateKey, templateValue := range attributes.GetAttributes() { customTemplateValues[fmt.Sprintf(templateVariableFormat, templateKey)] = templateValue } } @@ -481,8 +481,8 @@ func (c *controller) createResourceFromTemplate(ctx context.Context, templateDir // First, add the special case namespace template which is always substituted by the system // rather than fetched via a user-specified source. templateValues[fmt.Sprintf(templateVariableFormat, namespaceVariable)] = namespace - templateValues[fmt.Sprintf(templateVariableFormat, projectVariable)] = project.Id - templateValues[fmt.Sprintf(templateVariableFormat, domainVariable)] = domain.Id + templateValues[fmt.Sprintf(templateVariableFormat, projectVariable)] = project.GetId() + templateValues[fmt.Sprintf(templateVariableFormat, domainVariable)] = domain.GetId() var k8sManifest = string(template) for templateKey, templateValue := range customTemplateValues { @@ -587,11 +587,11 @@ func (c *controller) Sync(ctx context.Context) error { stats := ResourceSyncStats{} - for _, project := range projects.Projects { - for _, domain := range project.Domains { - namespace := common.GetNamespaceName(c.config.NamespaceMappingConfiguration().GetNamespaceTemplate(), project.Id, domain.Name) + for _, project := range projects.GetProjects() { + for _, domain := range project.GetDomains() { + namespace := common.GetNamespaceName(c.config.NamespaceMappingConfiguration().GetNamespaceTemplate(), project.GetId(), domain.GetName()) customTemplateValues, err := c.getCustomTemplateValues( - ctx, project.Id, domain.Id, domainTemplateValues[domain.Id]) + ctx, project.GetId(), domain.GetId(), domainTemplateValues[domain.GetId()]) if err != nil { logger.Errorf(ctx, "Failed to get custom template values for %s with err: %v", namespace, err) errs = append(errs, err) diff --git a/flyteadmin/pkg/clusterresource/impl/admin_service_data_provider.go b/flyteadmin/pkg/clusterresource/impl/admin_service_data_provider.go index 550637183d..16113edd23 100644 --- a/flyteadmin/pkg/clusterresource/impl/admin_service_data_provider.go +++ b/flyteadmin/pkg/clusterresource/impl/admin_service_data_provider.go @@ -25,9 +25,9 @@ func (p serviceAdminProvider) GetClusterResourceAttributes(ctx context.Context, if err != nil { return nil, err } - if resource != nil && resource.Attributes != nil && resource.Attributes.MatchingAttributes != nil && - resource.Attributes.MatchingAttributes.GetClusterResourceAttributes() != nil { - return resource.Attributes.MatchingAttributes.GetClusterResourceAttributes(), nil + if resource != nil && resource.GetAttributes() != nil && resource.GetAttributes().GetMatchingAttributes() != nil && + resource.GetAttributes().GetMatchingAttributes().GetClusterResourceAttributes() != nil { + return resource.GetAttributes().GetMatchingAttributes().GetClusterResourceAttributes(), nil } return nil, NewMissingEntityError("cluster resource attributes") } @@ -56,11 +56,11 @@ func (p serviceAdminProvider) GetProjects(ctx context.Context) (*admin.Projects, if err != nil { return nil, err } - projects = append(projects, projectResp.Projects...) - if len(projectResp.Token) == 0 { + projects = append(projects, projectResp.GetProjects()...) + if len(projectResp.GetToken()) == 0 { break } - listReq.Token = projectResp.Token + listReq.Token = projectResp.GetToken() } return &admin.Projects{ Projects: projects, diff --git a/flyteadmin/pkg/clusterresource/impl/admin_service_data_provider_test.go b/flyteadmin/pkg/clusterresource/impl/admin_service_data_provider_test.go index 182c9e2573..0bd1390f50 100644 --- a/flyteadmin/pkg/clusterresource/impl/admin_service_data_provider_test.go +++ b/flyteadmin/pkg/clusterresource/impl/admin_service_data_provider_test.go @@ -24,7 +24,7 @@ func TestServiceGetClusterResourceAttributes(t *testing.T) { } mockAdmin := mocks.AdminServiceClient{} mockAdmin.OnGetProjectDomainAttributesMatch(ctx, mock.MatchedBy(func(req *admin.ProjectDomainAttributesGetRequest) bool { - return req.Project == project && req.Domain == domain && req.ResourceType == admin.MatchableResource_CLUSTER_RESOURCE + return req.GetProject() == project && req.GetDomain() == domain && req.GetResourceType() == admin.MatchableResource_CLUSTER_RESOURCE })).Return(&admin.ProjectDomainAttributesGetResponse{ Attributes: &admin.ProjectDomainAttributes{ MatchingAttributes: &admin.MatchingAttributes{ @@ -42,12 +42,12 @@ func TestServiceGetClusterResourceAttributes(t *testing.T) { } attrs, err := provider.GetClusterResourceAttributes(context.TODO(), project, domain) assert.NoError(t, err) - assert.EqualValues(t, attrs.Attributes, attributes) + assert.EqualValues(t, attrs.GetAttributes(), attributes) }) t.Run("admin service error", func(t *testing.T) { mockAdmin := mocks.AdminServiceClient{} mockAdmin.OnGetProjectDomainAttributesMatch(ctx, mock.MatchedBy(func(req *admin.ProjectDomainAttributesGetRequest) bool { - return req.Project == project && req.Domain == domain && req.ResourceType == admin.MatchableResource_CLUSTER_RESOURCE + return req.GetProject() == project && req.GetDomain() == domain && req.GetResourceType() == admin.MatchableResource_CLUSTER_RESOURCE })).Return(&admin.ProjectDomainAttributesGetResponse{}, errFoo) provider := serviceAdminProvider{ @@ -59,7 +59,7 @@ func TestServiceGetClusterResourceAttributes(t *testing.T) { t.Run("wonky admin service response", func(t *testing.T) { mockAdmin := mocks.AdminServiceClient{} mockAdmin.OnGetProjectDomainAttributesMatch(ctx, mock.MatchedBy(func(req *admin.ProjectDomainAttributesGetRequest) bool { - return req.Project == project && req.Domain == domain && req.ResourceType == admin.MatchableResource_CLUSTER_RESOURCE + return req.GetProject() == project && req.GetDomain() == domain && req.GetResourceType() == admin.MatchableResource_CLUSTER_RESOURCE })).Return(&admin.ProjectDomainAttributesGetResponse{ Attributes: &admin.ProjectDomainAttributes{ MatchingAttributes: &admin.MatchingAttributes{ @@ -88,7 +88,7 @@ func TestServiceGetProjects(t *testing.T) { t.Run("happy case", func(t *testing.T) { mockAdmin := mocks.AdminServiceClient{} mockAdmin.OnListProjectsMatch(ctx, mock.MatchedBy(func(req *admin.ProjectListRequest) bool { - return req.Limit == 100 && req.Filters == "ne(state,1)" && req.SortBy.Key == "created_at" + return req.GetLimit() == 100 && req.GetFilters() == "ne(state,1)" && req.GetSortBy().GetKey() == "created_at" })).Return(&admin.Projects{ Projects: []*admin.Project{ { @@ -104,12 +104,12 @@ func TestServiceGetProjects(t *testing.T) { } projects, err := provider.GetProjects(ctx) assert.NoError(t, err) - assert.Len(t, projects.Projects, 2) + assert.Len(t, projects.GetProjects(), 2) }) t.Run("admin error", func(t *testing.T) { mockAdmin := mocks.AdminServiceClient{} mockAdmin.OnListProjectsMatch(ctx, mock.MatchedBy(func(req *admin.ProjectListRequest) bool { - return req.Limit == 100 && req.Filters == "ne(state,1)" && req.SortBy.Key == "created_at" + return req.GetLimit() == 100 && req.GetFilters() == "ne(state,1)" && req.GetSortBy().GetKey() == "created_at" })).Return(nil, errFoo) provider := serviceAdminProvider{ adminClient: &mockAdmin, diff --git a/flyteadmin/pkg/clusterresource/impl/db_admin_data_provider_test.go b/flyteadmin/pkg/clusterresource/impl/db_admin_data_provider_test.go index 81ba4805ba..7fa0039799 100644 --- a/flyteadmin/pkg/clusterresource/impl/db_admin_data_provider_test.go +++ b/flyteadmin/pkg/clusterresource/impl/db_admin_data_provider_test.go @@ -49,7 +49,7 @@ func TestGetClusterResourceAttributes(t *testing.T) { } attrs, err := provider.GetClusterResourceAttributes(context.TODO(), project, domain) assert.NoError(t, err) - assert.EqualValues(t, attrs.Attributes, attributes) + assert.EqualValues(t, attrs.GetAttributes(), attributes) }) t.Run("error", func(t *testing.T) { resourceManager.GetResourceFunc = func(ctx context.Context, request interfaces.ResourceRequest) (*interfaces.ResourceResponse, error) { @@ -125,7 +125,7 @@ func TestGetProjects(t *testing.T) { } projects, err := provider.GetProjects(context.TODO()) assert.NoError(t, err) - assert.Len(t, projects.Projects, 2) + assert.Len(t, projects.GetProjects(), 2) }) t.Run("db error", func(t *testing.T) { mockRepo := repoMocks.NewMockRepository() diff --git a/flyteadmin/pkg/common/flyte_url.go b/flyteadmin/pkg/common/flyte_url.go index f5245ac238..e4c005d902 100644 --- a/flyteadmin/pkg/common/flyte_url.go +++ b/flyteadmin/pkg/common/flyte_url.go @@ -108,7 +108,7 @@ func ParseFlyteURLToExecution(flyteURL string) (ParsedExecution, error) { taskExecID := core.TaskExecutionIdentifier{ NodeExecutionId: &nodeExecID, // checking for overflow here is probably unreasonable - RetryAttempt: uint32(a), + RetryAttempt: uint32(a), // #nosec G115 } return ParsedExecution{ PartialTaskExecID: &taskExecID, @@ -126,8 +126,8 @@ func ParseFlyteURLToExecution(flyteURL string) (ParsedExecution, error) { } func FlyteURLsFromNodeExecutionID(nodeExecutionID *core.NodeExecutionIdentifier, deck bool) *admin.FlyteURLs { - base := fmt.Sprintf("flyte://v1/%s/%s/%s/%s", nodeExecutionID.ExecutionId.Project, - nodeExecutionID.ExecutionId.Domain, nodeExecutionID.ExecutionId.Name, nodeExecutionID.NodeId) + base := fmt.Sprintf("flyte://v1/%s/%s/%s/%s", nodeExecutionID.GetExecutionId().GetProject(), + nodeExecutionID.GetExecutionId().GetDomain(), nodeExecutionID.GetExecutionId().GetName(), nodeExecutionID.GetNodeId()) res := &admin.FlyteURLs{ Inputs: fmt.Sprintf("%s/%s", base, ArtifactTypeI), @@ -143,7 +143,7 @@ func FlyteURLsFromNodeExecutionID(nodeExecutionID *core.NodeExecutionIdentifier, // This constructs a fully unique prefix, and when post-pended with the output name, forms a fully unique name for // the artifact service (including the project/domain of course, which the artifact service will add). func FlyteURLKeyFromNodeExecutionID(nodeExecutionID *core.NodeExecutionIdentifier) string { - res := fmt.Sprintf("%s/%s", nodeExecutionID.ExecutionId.Name, nodeExecutionID.NodeId) + res := fmt.Sprintf("%s/%s", nodeExecutionID.GetExecutionId().GetName(), nodeExecutionID.GetNodeId()) return res } @@ -151,14 +151,14 @@ func FlyteURLKeyFromNodeExecutionID(nodeExecutionID *core.NodeExecutionIdentifie // FlyteURLKeyFromNodeExecutionIDRetry is a modified version of the function above. // See the uniqueness comment above. func FlyteURLKeyFromNodeExecutionIDRetry(nodeExecutionID *core.NodeExecutionIdentifier, retry int) string { - res := fmt.Sprintf("%s/%s/%s", nodeExecutionID.ExecutionId.Name, nodeExecutionID.NodeId, strconv.Itoa(retry)) + res := fmt.Sprintf("%s/%s/%s", nodeExecutionID.GetExecutionId().GetName(), nodeExecutionID.GetNodeId(), strconv.Itoa(retry)) return res } func FlyteURLsFromTaskExecutionID(taskExecutionID *core.TaskExecutionIdentifier, deck bool) *admin.FlyteURLs { - base := fmt.Sprintf("flyte://v1/%s/%s/%s/%s/%s", taskExecutionID.NodeExecutionId.ExecutionId.Project, - taskExecutionID.NodeExecutionId.ExecutionId.Domain, taskExecutionID.NodeExecutionId.ExecutionId.Name, taskExecutionID.NodeExecutionId.NodeId, strconv.Itoa(int(taskExecutionID.RetryAttempt))) + base := fmt.Sprintf("flyte://v1/%s/%s/%s/%s/%s", taskExecutionID.GetNodeExecutionId().GetExecutionId().GetProject(), + taskExecutionID.GetNodeExecutionId().GetExecutionId().GetDomain(), taskExecutionID.GetNodeExecutionId().GetExecutionId().GetName(), taskExecutionID.GetNodeExecutionId().GetNodeId(), strconv.Itoa(int(taskExecutionID.GetRetryAttempt()))) res := &admin.FlyteURLs{ Inputs: fmt.Sprintf("%s/%s", base, ArtifactTypeI), diff --git a/flyteadmin/pkg/common/flyte_url_test.go b/flyteadmin/pkg/common/flyte_url_test.go index a0cbfcda2b..bd954c5bb6 100644 --- a/flyteadmin/pkg/common/flyte_url_test.go +++ b/flyteadmin/pkg/common/flyte_url_test.go @@ -197,11 +197,11 @@ func TestParseFlyteURLToExecution(t *testing.T) { x, err := ParseFlyteURLToExecution("flyte://v1/fs/dev/abc/n0/3/o/o0") assert.NoError(t, err) assert.Nil(t, x.NodeExecID) - assert.Nil(t, x.PartialTaskExecID.TaskId) - assert.Equal(t, "fs", x.PartialTaskExecID.NodeExecutionId.ExecutionId.Project) - assert.Equal(t, "dev", x.PartialTaskExecID.NodeExecutionId.ExecutionId.Domain) - assert.Equal(t, "abc", x.PartialTaskExecID.NodeExecutionId.ExecutionId.Name) - assert.Equal(t, "n0", x.PartialTaskExecID.NodeExecutionId.NodeId) + assert.Nil(t, x.PartialTaskExecID.GetTaskId()) + assert.Equal(t, "fs", x.PartialTaskExecID.GetNodeExecutionId().GetExecutionId().GetProject()) + assert.Equal(t, "dev", x.PartialTaskExecID.GetNodeExecutionId().GetExecutionId().GetDomain()) + assert.Equal(t, "abc", x.PartialTaskExecID.GetNodeExecutionId().GetExecutionId().GetName()) + assert.Equal(t, "n0", x.PartialTaskExecID.GetNodeExecutionId().GetNodeId()) assert.Equal(t, uint32(3), x.PartialTaskExecID.GetRetryAttempt()) assert.Equal(t, "o0", x.LiteralName) }) @@ -210,11 +210,11 @@ func TestParseFlyteURLToExecution(t *testing.T) { x, err := ParseFlyteURLToExecution("flyte://v1/fs/dev/abc/n0/3/o") assert.NoError(t, err) assert.Nil(t, x.NodeExecID) - assert.Nil(t, x.PartialTaskExecID.TaskId) - assert.Equal(t, "fs", x.PartialTaskExecID.NodeExecutionId.ExecutionId.Project) - assert.Equal(t, "dev", x.PartialTaskExecID.NodeExecutionId.ExecutionId.Domain) - assert.Equal(t, "abc", x.PartialTaskExecID.NodeExecutionId.ExecutionId.Name) - assert.Equal(t, "n0", x.PartialTaskExecID.NodeExecutionId.NodeId) + assert.Nil(t, x.PartialTaskExecID.GetTaskId()) + assert.Equal(t, "fs", x.PartialTaskExecID.GetNodeExecutionId().GetExecutionId().GetProject()) + assert.Equal(t, "dev", x.PartialTaskExecID.GetNodeExecutionId().GetExecutionId().GetDomain()) + assert.Equal(t, "abc", x.PartialTaskExecID.GetNodeExecutionId().GetExecutionId().GetName()) + assert.Equal(t, "n0", x.PartialTaskExecID.GetNodeExecutionId().GetNodeId()) assert.Equal(t, uint32(3), x.PartialTaskExecID.GetRetryAttempt()) assert.Equal(t, "", x.LiteralName) }) @@ -224,10 +224,10 @@ func TestParseFlyteURLToExecution(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, x.NodeExecID) assert.Nil(t, x.PartialTaskExecID) - assert.Equal(t, "fs", x.NodeExecID.ExecutionId.Project) - assert.Equal(t, "dev", x.NodeExecID.ExecutionId.Domain) - assert.Equal(t, "abc", x.NodeExecID.ExecutionId.Name) - assert.Equal(t, "n0", x.NodeExecID.NodeId) + assert.Equal(t, "fs", x.NodeExecID.GetExecutionId().GetProject()) + assert.Equal(t, "dev", x.NodeExecID.GetExecutionId().GetDomain()) + assert.Equal(t, "abc", x.NodeExecID.GetExecutionId().GetName()) + assert.Equal(t, "n0", x.NodeExecID.GetNodeId()) assert.Equal(t, "o0", x.LiteralName) }) @@ -236,10 +236,10 @@ func TestParseFlyteURLToExecution(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, x.NodeExecID) assert.Nil(t, x.PartialTaskExecID) - assert.Equal(t, "fs", x.NodeExecID.ExecutionId.Project) - assert.Equal(t, "dev", x.NodeExecID.ExecutionId.Domain) - assert.Equal(t, "abc", x.NodeExecID.ExecutionId.Name) - assert.Equal(t, "n0", x.NodeExecID.NodeId) + assert.Equal(t, "fs", x.NodeExecID.GetExecutionId().GetProject()) + assert.Equal(t, "dev", x.NodeExecID.GetExecutionId().GetDomain()) + assert.Equal(t, "abc", x.NodeExecID.GetExecutionId().GetName()) + assert.Equal(t, "n0", x.NodeExecID.GetNodeId()) assert.Equal(t, "", x.LiteralName) }) @@ -248,10 +248,10 @@ func TestParseFlyteURLToExecution(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, x.NodeExecID) assert.Nil(t, x.PartialTaskExecID) - assert.Equal(t, "fs", x.NodeExecID.ExecutionId.Project) - assert.Equal(t, "dev", x.NodeExecID.ExecutionId.Domain) - assert.Equal(t, "abc", x.NodeExecID.ExecutionId.Name) - assert.Equal(t, "n0", x.NodeExecID.NodeId) + assert.Equal(t, "fs", x.NodeExecID.GetExecutionId().GetProject()) + assert.Equal(t, "dev", x.NodeExecID.GetExecutionId().GetDomain()) + assert.Equal(t, "abc", x.NodeExecID.GetExecutionId().GetName()) + assert.Equal(t, "n0", x.NodeExecID.GetNodeId()) assert.Equal(t, "", x.LiteralName) assert.Equal(t, ArtifactTypeI, x.IOType) }) @@ -261,10 +261,10 @@ func TestParseFlyteURLToExecution(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, x.NodeExecID) assert.Nil(t, x.PartialTaskExecID) - assert.Equal(t, "fs", x.NodeExecID.ExecutionId.Project) - assert.Equal(t, "dev", x.NodeExecID.ExecutionId.Domain) - assert.Equal(t, "abc", x.NodeExecID.ExecutionId.Name) - assert.Equal(t, "n0", x.NodeExecID.NodeId) + assert.Equal(t, "fs", x.NodeExecID.GetExecutionId().GetProject()) + assert.Equal(t, "dev", x.NodeExecID.GetExecutionId().GetDomain()) + assert.Equal(t, "abc", x.NodeExecID.GetExecutionId().GetName()) + assert.Equal(t, "n0", x.NodeExecID.GetNodeId()) assert.Equal(t, "", x.LiteralName) assert.Equal(t, ArtifactTypeD, x.IOType) }) diff --git a/flyteadmin/pkg/common/sorting.go b/flyteadmin/pkg/common/sorting.go index c89b86a914..246c73b52c 100644 --- a/flyteadmin/pkg/common/sorting.go +++ b/flyteadmin/pkg/common/sorting.go @@ -30,13 +30,13 @@ func NewSortParameter(sort *admin.Sort, allowed sets.String) (SortParameter, err return nil, nil } - key := sort.Key + key := sort.GetKey() if !allowed.Has(key) { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid sort key '%s'", key) } var gormOrderExpression string - switch sort.Direction { + switch sort.GetDirection() { case admin.Sort_DESCENDING: gormOrderExpression = fmt.Sprintf(gormDescending, key) case admin.Sort_ASCENDING: diff --git a/flyteadmin/pkg/config/config.go b/flyteadmin/pkg/config/config.go index f6bdd27141..0e63eccb45 100644 --- a/flyteadmin/pkg/config/config.go +++ b/flyteadmin/pkg/config/config.go @@ -66,10 +66,13 @@ type KubeClientConfig struct { } type ServerSecurityOptions struct { - Secure bool `json:"secure"` - Ssl SslOptions `json:"ssl"` - UseAuth bool `json:"useAuth"` - AuditAccess bool `json:"auditAccess"` + Secure bool `json:"secure"` + Ssl SslOptions `json:"ssl"` + UseAuth bool `json:"useAuth"` + // InsecureCookieHeader should only be set in the case where we want to serve cookies with the header "Secure" set to false. + // This is useful for local development and *never* in production. + InsecureCookieHeader bool `json:"insecureCookieHeader"` + AuditAccess bool `json:"auditAccess"` // These options are here to allow deployments where the Flyte UI (Console) is served from a different domain/port. // Note that CORS only applies to Admin's API endpoints. The health check endpoint for instance is unaffected. diff --git a/flyteadmin/pkg/config/serverconfig_flags.go b/flyteadmin/pkg/config/serverconfig_flags.go index 10229a458a..09a5d70a26 100755 --- a/flyteadmin/pkg/config/serverconfig_flags.go +++ b/flyteadmin/pkg/config/serverconfig_flags.go @@ -59,6 +59,7 @@ func (cfg ServerConfig) GetPFlagSet(prefix string) *pflag.FlagSet { cmdFlags.String(fmt.Sprintf("%v%v", prefix, "security.ssl.certificateFile"), defaultServerConfig.Security.Ssl.CertificateFile, "") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "security.ssl.keyFile"), defaultServerConfig.Security.Ssl.KeyFile, "") cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "security.useAuth"), defaultServerConfig.Security.UseAuth, "") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "security.insecureCookieHeader"), defaultServerConfig.Security.InsecureCookieHeader, "") cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "security.auditAccess"), defaultServerConfig.Security.AuditAccess, "") cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "security.allowCors"), defaultServerConfig.Security.AllowCors, "") cmdFlags.StringSlice(fmt.Sprintf("%v%v", prefix, "security.allowedOrigins"), defaultServerConfig.Security.AllowedOrigins, "") diff --git a/flyteadmin/pkg/config/serverconfig_flags_test.go b/flyteadmin/pkg/config/serverconfig_flags_test.go index 6a95336f40..a18b56156e 100755 --- a/flyteadmin/pkg/config/serverconfig_flags_test.go +++ b/flyteadmin/pkg/config/serverconfig_flags_test.go @@ -225,6 +225,20 @@ func TestServerConfig_SetFlags(t *testing.T) { } }) }) + t.Run("Test_security.insecureCookieHeader", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("security.insecureCookieHeader", testValue) + if vBool, err := cmdFlags.GetBool("security.insecureCookieHeader"); err == nil { + testDecodeJson_ServerConfig(t, fmt.Sprintf("%v", vBool), &actual.Security.InsecureCookieHeader) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) t.Run("Test_security.auditAccess", func(t *testing.T) { t.Run("Override", func(t *testing.T) { diff --git a/flyteadmin/pkg/data/implementations/aws_remote_url_test.go b/flyteadmin/pkg/data/implementations/aws_remote_url_test.go index 878351fcf2..0677a498a3 100644 --- a/flyteadmin/pkg/data/implementations/aws_remote_url_test.go +++ b/flyteadmin/pkg/data/implementations/aws_remote_url_test.go @@ -80,6 +80,6 @@ func TestAWSGet(t *testing.T) { } urlBlob, err := remoteURL.Get(context.Background(), "s3://bucket/key") assert.Nil(t, err) - assert.Equal(t, "www://host/path", urlBlob.Url) - assert.Equal(t, contentLength, urlBlob.Bytes) + assert.Equal(t, "www://host/path", urlBlob.GetUrl()) + assert.Equal(t, contentLength, urlBlob.GetBytes()) } diff --git a/flyteadmin/pkg/data/implementations/gcp_remote_url.go b/flyteadmin/pkg/data/implementations/gcp_remote_url.go index 3a8dc98679..56fe7412e7 100644 --- a/flyteadmin/pkg/data/implementations/gcp_remote_url.go +++ b/flyteadmin/pkg/data/implementations/gcp_remote_url.go @@ -110,7 +110,7 @@ func (g *GCPRemoteURL) signURL(ctx context.Context, gcsURI GCPGCSObject) (string if err != nil { return nil, err } - return resp.SignedBlob, nil + return resp.GetSignedBlob(), nil }, Expires: time.Now().Add(g.signDuration), } @@ -159,8 +159,8 @@ func (ts impersonationTokenSource) Token() (*oauth2.Token, error) { } return &oauth2.Token{ - AccessToken: resp.AccessToken, - Expiry: asTime(resp.ExpireTime), + AccessToken: resp.GetAccessToken(), + Expiry: asTime(resp.GetExpireTime()), }, nil } diff --git a/flyteadmin/pkg/data/implementations/gcp_remote_url_test.go b/flyteadmin/pkg/data/implementations/gcp_remote_url_test.go index cfcce1ff5a..a1156518e6 100644 --- a/flyteadmin/pkg/data/implementations/gcp_remote_url_test.go +++ b/flyteadmin/pkg/data/implementations/gcp_remote_url_test.go @@ -88,7 +88,7 @@ func TestGCPGet(t *testing.T) { mockIAMCredentials := mockIAMCredentialsImpl{} mockIAMCredentials.signBlobFunc = func(ctx context.Context, req *credentialspb.SignBlobRequest, opts ...gax.CallOption) (*credentialspb.SignBlobResponse, error) { - assert.Equal(t, "projects/-/serviceAccounts/"+signingPrincipal, req.Name) + assert.Equal(t, "projects/-/serviceAccounts/"+signingPrincipal, req.GetName()) return &credentialspb.SignBlobResponse{SignedBlob: []byte(signedBlob)}, nil } @@ -102,12 +102,12 @@ func TestGCPGet(t *testing.T) { urlBlob, err := remoteURL.Get(context.Background(), "gs://bucket/key") assert.Nil(t, err) - u, _ := url.Parse(urlBlob.Url) + u, _ := url.Parse(urlBlob.GetUrl()) assert.Equal(t, "https", u.Scheme) assert.Equal(t, "storage.googleapis.com", u.Hostname()) assert.Equal(t, "/bucket/key", u.Path) assert.Equal(t, encodedSignedBlob, u.Query().Get("Signature")) - assert.Equal(t, int64(100), urlBlob.Bytes) + assert.Equal(t, int64(100), urlBlob.GetBytes()) } func TestToken(t *testing.T) { @@ -117,8 +117,8 @@ func TestToken(t *testing.T) { mockIAMCredentials := mockIAMCredentialsImpl{} mockIAMCredentials.generateAccessTokenFunc = func(ctx context.Context, req *credentialspb.GenerateAccessTokenRequest, opts ...gax.CallOption) (*credentialspb.GenerateAccessTokenResponse, error) { - assert.Equal(t, "projects/-/serviceAccounts/"+signingPrincipal, req.Name) - assert.Equal(t, []string{"https://www.googleapis.com/auth/devstorage.read_only"}, req.Scope) + assert.Equal(t, "projects/-/serviceAccounts/"+signingPrincipal, req.GetName()) + assert.Equal(t, []string{"https://www.googleapis.com/auth/devstorage.read_only"}, req.GetScope()) return &credentialspb.GenerateAccessTokenResponse{ AccessToken: token, ExpireTime: ×tamp, diff --git a/flyteadmin/pkg/data/implementations/noop_remote_url_test.go b/flyteadmin/pkg/data/implementations/noop_remote_url_test.go index 965dc9eeb2..c4e14a394a 100644 --- a/flyteadmin/pkg/data/implementations/noop_remote_url_test.go +++ b/flyteadmin/pkg/data/implementations/noop_remote_url_test.go @@ -44,6 +44,6 @@ func TestNoopRemoteURLGet(t *testing.T) { urlBlob, err := noopRemoteURL.Get(context.Background(), "uri") assert.Nil(t, err) assert.NotEmpty(t, urlBlob) - assert.Equal(t, "uri", urlBlob.Url) - assert.Equal(t, noopFileSize, urlBlob.Bytes) + assert.Equal(t, "uri", urlBlob.GetUrl()) + assert.Equal(t, noopFileSize, urlBlob.GetBytes()) } diff --git a/flyteadmin/pkg/errors/errors.go b/flyteadmin/pkg/errors/errors.go index 5fc48b0b67..8e280e11dd 100644 --- a/flyteadmin/pkg/errors/errors.go +++ b/flyteadmin/pkg/errors/errors.go @@ -91,7 +91,7 @@ func NewAlreadyInTerminalStateError(ctx context.Context, errorMsg string, curPha statusErr, transformationErr := NewFlyteAdminError(codes.FailedPrecondition, errorMsg).WithDetails(reason) if transformationErr != nil { logger.Panicf(ctx, "Failed to wrap grpc status in type 'Error': %v", transformationErr) - return NewFlyteAdminErrorf(codes.FailedPrecondition, errorMsg) + return NewFlyteAdminErrorf(codes.FailedPrecondition, errorMsg) //nolint } return statusErr } @@ -105,8 +105,8 @@ func NewIncompatibleClusterError(ctx context.Context, errorMsg, curCluster strin }, }) if transformationErr != nil { - logger.Panicf(ctx, "Failed to wrap grpc status in type 'Error': %v", transformationErr) - return NewFlyteAdminErrorf(codes.FailedPrecondition, errorMsg) + logger.Panicf(ctx, "Failed to wrap grpc status in type 'Error': %v", transformationErr) //nolint + return NewFlyteAdminErrorf(codes.FailedPrecondition, errorMsg) //nolint } return statusErr } @@ -128,23 +128,23 @@ func compareJsons(jsonArray1 jsondiff.Patch, jsonArray2 jsondiff.Patch) []string } func NewTaskExistsDifferentStructureError(ctx context.Context, request *admin.TaskCreateRequest, oldSpec *core.CompiledTask, newSpec *core.CompiledTask) FlyteAdminError { - errorMsg := fmt.Sprintf("%v task with different structure already exists. (Please register a new version of the task):\n", request.Id.Name) + errorMsg := fmt.Sprintf("%v task with different structure already exists. (Please register a new version of the task):\n", request.GetId().GetName()) diff, _ := jsondiff.Compare(oldSpec, newSpec) rdiff, _ := jsondiff.Compare(newSpec, oldSpec) rs := compareJsons(diff, rdiff) errorMsg += strings.Join(rs, "\n") - return NewFlyteAdminErrorf(codes.InvalidArgument, errorMsg) + return NewFlyteAdminErrorf(codes.InvalidArgument, errorMsg) //nolint } func NewTaskExistsIdenticalStructureError(ctx context.Context, request *admin.TaskCreateRequest) FlyteAdminError { errorMsg := "task with identical structure already exists" - return NewFlyteAdminErrorf(codes.AlreadyExists, errorMsg) + return NewFlyteAdminErrorf(codes.AlreadyExists, errorMsg) //nolint } func NewWorkflowExistsDifferentStructureError(ctx context.Context, request *admin.WorkflowCreateRequest, oldSpec *core.CompiledWorkflowClosure, newSpec *core.CompiledWorkflowClosure) FlyteAdminError { - errorMsg := fmt.Sprintf("%v workflow with different structure already exists. (Please register a new version of the workflow):\n", request.Id.Name) + errorMsg := fmt.Sprintf("%v workflow with different structure already exists. (Please register a new version of the workflow):\n", request.GetId().GetName()) diff, _ := jsondiff.Compare(oldSpec, newSpec) rdiff, _ := jsondiff.Compare(newSpec, oldSpec) rs := compareJsons(diff, rdiff) @@ -154,13 +154,13 @@ func NewWorkflowExistsDifferentStructureError(ctx context.Context, request *admi statusErr, transformationErr := NewFlyteAdminError(codes.InvalidArgument, errorMsg).WithDetails(&admin.CreateWorkflowFailureReason{ Reason: &admin.CreateWorkflowFailureReason_ExistsDifferentStructure{ ExistsDifferentStructure: &admin.WorkflowErrorExistsDifferentStructure{ - Id: request.Id, + Id: request.GetId(), }, }, }) if transformationErr != nil { logger.Errorf(ctx, "Failed to wrap grpc status in type 'Error': %v", transformationErr) - return NewFlyteAdminErrorf(codes.InvalidArgument, errorMsg) + return NewFlyteAdminErrorf(codes.InvalidArgument, errorMsg) //nolint } return statusErr } @@ -170,31 +170,31 @@ func NewWorkflowExistsIdenticalStructureError(ctx context.Context, request *admi statusErr, transformationErr := NewFlyteAdminError(codes.AlreadyExists, errorMsg).WithDetails(&admin.CreateWorkflowFailureReason{ Reason: &admin.CreateWorkflowFailureReason_ExistsIdenticalStructure{ ExistsIdenticalStructure: &admin.WorkflowErrorExistsIdenticalStructure{ - Id: request.Id, + Id: request.GetId(), }, }, }) if transformationErr != nil { logger.Errorf(ctx, "Failed to wrap grpc status in type 'Error': %v", transformationErr) - return NewFlyteAdminErrorf(codes.AlreadyExists, errorMsg) + return NewFlyteAdminErrorf(codes.AlreadyExists, errorMsg) //nolint } return statusErr } func NewLaunchPlanExistsDifferentStructureError(ctx context.Context, request *admin.LaunchPlanCreateRequest, oldSpec *admin.LaunchPlanSpec, newSpec *admin.LaunchPlanSpec) FlyteAdminError { - errorMsg := fmt.Sprintf("%v launch plan with different structure already exists. (Please register a new version of the launch plan):\n", request.Id.Name) + errorMsg := fmt.Sprintf("%v launch plan with different structure already exists. (Please register a new version of the launch plan):\n", request.GetId().GetName()) diff, _ := jsondiff.Compare(oldSpec, newSpec) rdiff, _ := jsondiff.Compare(newSpec, oldSpec) rs := compareJsons(diff, rdiff) errorMsg += strings.Join(rs, "\n") - return NewFlyteAdminErrorf(codes.InvalidArgument, errorMsg) + return NewFlyteAdminErrorf(codes.InvalidArgument, errorMsg) //nolint } func NewLaunchPlanExistsIdenticalStructureError(ctx context.Context, request *admin.LaunchPlanCreateRequest) FlyteAdminError { errorMsg := "launch plan with identical structure already exists" - return NewFlyteAdminErrorf(codes.AlreadyExists, errorMsg) + return NewFlyteAdminErrorf(codes.AlreadyExists, errorMsg) //nolint } func IsDoesNotExistError(err error) bool { @@ -209,12 +209,12 @@ func NewInactiveProjectError(ctx context.Context, id string) FlyteAdminError { }) if transformationErr != nil { logger.Errorf(ctx, "failed to wrap grpc status in type 'Error': %v", transformationErr) - return NewFlyteAdminErrorf(codes.InvalidArgument, errMsg) + return NewFlyteAdminErrorf(codes.InvalidArgument, errMsg) //nolint } return statusErr } func NewInvalidLiteralTypeError(name string, err error) FlyteAdminError { return NewFlyteAdminErrorf(codes.InvalidArgument, - fmt.Sprintf("Failed to validate literal type for [%s] with err: %s", name, err)) + fmt.Sprintf("Failed to validate literal type for [%s] with err: %s", name, err)) //nolint } diff --git a/flyteadmin/pkg/errors/errors_test.go b/flyteadmin/pkg/errors/errors_test.go index 18c76992b5..a72b4ce2eb 100644 --- a/flyteadmin/pkg/errors/errors_test.go +++ b/flyteadmin/pkg/errors/errors_test.go @@ -284,7 +284,7 @@ func TestNewLaunchPlanExistsDifferentStructureError(t *testing.T) { Id: &identifier, } - statusErr := NewLaunchPlanExistsDifferentStructureError(context.Background(), req, oldLaunchPlan.Spec, newLaunchPlan.Spec) + statusErr := NewLaunchPlanExistsDifferentStructureError(context.Background(), req, oldLaunchPlan.GetSpec(), newLaunchPlan.GetSpec()) assert.NotNil(t, statusErr) s, ok := status.FromError(statusErr) assert.True(t, ok) @@ -325,5 +325,5 @@ func TestNewInactiveProjectError(t *testing.T) { details, ok := statusErr.Details()[0].(*admin.InactiveProject) assert.True(t, ok) - assert.Equal(t, identifier.GetProject(), details.Id) + assert.Equal(t, identifier.GetProject(), details.GetId()) } diff --git a/flyteadmin/pkg/executioncluster/impl/in_cluster.go b/flyteadmin/pkg/executioncluster/impl/in_cluster.go index f06d1c4adf..2fdd8271e1 100644 --- a/flyteadmin/pkg/executioncluster/impl/in_cluster.go +++ b/flyteadmin/pkg/executioncluster/impl/in_cluster.go @@ -26,8 +26,8 @@ func (i InCluster) GetTarget(ctx context.Context, spec *executioncluster.Executi if spec != nil && !(spec.TargetID == "" || spec.TargetID == defaultInClusterTargetID) { return nil, errors.New(fmt.Sprintf("remote target %s is not supported", spec.TargetID)) } - if spec != nil && spec.ExecutionClusterLabel != nil && spec.ExecutionClusterLabel.Value != "" { - return nil, errors.New(fmt.Sprintf("execution cluster label %s is not supported", spec.ExecutionClusterLabel.Value)) + if spec != nil && spec.ExecutionClusterLabel != nil && spec.ExecutionClusterLabel.GetValue() != "" { + return nil, errors.New(fmt.Sprintf("execution cluster label %s is not supported", spec.ExecutionClusterLabel.GetValue())) } return &i.target, nil } diff --git a/flyteadmin/pkg/executioncluster/impl/random_cluster_selector.go b/flyteadmin/pkg/executioncluster/impl/random_cluster_selector.go index 35340d3822..e4c2149220 100644 --- a/flyteadmin/pkg/executioncluster/impl/random_cluster_selector.go +++ b/flyteadmin/pkg/executioncluster/impl/random_cluster_selector.go @@ -34,7 +34,7 @@ func getRandSource(seed string) (rand.Source, error) { if err != nil { return nil, err } - hashedSeed := int64(h.Sum64()) + hashedSeed := int64(h.Sum64()) // #nosec G115 return rand.NewSource(hashedSeed), nil } @@ -98,8 +98,8 @@ func (s RandomClusterSelector) GetTarget(ctx context.Context, spec *executionclu var label string - if spec.ExecutionClusterLabel != nil && spec.ExecutionClusterLabel.Value != "" { - label = spec.ExecutionClusterLabel.Value + if spec.ExecutionClusterLabel != nil && spec.ExecutionClusterLabel.GetValue() != "" { + label = spec.ExecutionClusterLabel.GetValue() logger.Debugf(ctx, "Using execution cluster label %s", label) } else { resource, err := s.resourceManager.GetResource(ctx, managerInterfaces.ResourceRequest{ @@ -113,7 +113,7 @@ func (s RandomClusterSelector) GetTarget(ctx context.Context, spec *executionclu return nil, err } if resource != nil && resource.Attributes.GetExecutionClusterLabel() != nil { - label = resource.Attributes.GetExecutionClusterLabel().Value + label = resource.Attributes.GetExecutionClusterLabel().GetValue() } } diff --git a/flyteadmin/pkg/manager/impl/description_entity_manager.go b/flyteadmin/pkg/manager/impl/description_entity_manager.go index a7affd5e88..7a2fdd239c 100644 --- a/flyteadmin/pkg/manager/impl/description_entity_manager.go +++ b/flyteadmin/pkg/manager/impl/description_entity_manager.go @@ -38,8 +38,8 @@ func (d *DescriptionEntityManager) GetDescriptionEntity(ctx context.Context, req logger.Errorf(ctx, "invalid request [%+v]: %v", request, err) return nil, err } - ctx = contextutils.WithProjectDomain(ctx, request.Id.Project, request.Id.Domain) - return util.GetDescriptionEntity(ctx, d.db, request.Id) + ctx = contextutils.WithProjectDomain(ctx, request.GetId().GetProject(), request.GetId().GetDomain()) + return util.GetDescriptionEntity(ctx, d.db, request.GetId()) } func (d *DescriptionEntityManager) ListDescriptionEntity(ctx context.Context, request *admin.DescriptionEntityListRequest) (*admin.DescriptionEntityList, error) { @@ -47,44 +47,44 @@ func (d *DescriptionEntityManager) ListDescriptionEntity(ctx context.Context, re if err := validation.ValidateDescriptionEntityListRequest(request); err != nil { return nil, err } - ctx = contextutils.WithProjectDomain(ctx, request.Id.Project, request.Id.Domain) + ctx = contextutils.WithProjectDomain(ctx, request.GetId().GetProject(), request.GetId().GetDomain()) - if request.ResourceType == core.ResourceType_WORKFLOW { - ctx = contextutils.WithWorkflowID(ctx, request.Id.Name) + if request.GetResourceType() == core.ResourceType_WORKFLOW { + ctx = contextutils.WithWorkflowID(ctx, request.GetId().GetName()) } else { - ctx = contextutils.WithTaskID(ctx, request.Id.Name) + ctx = contextutils.WithTaskID(ctx, request.GetId().GetName()) } filters, err := util.GetDbFilters(util.FilterSpec{ - Project: request.Id.Project, - Domain: request.Id.Domain, - Name: request.Id.Name, - RequestFilters: request.Filters, - }, common.ResourceTypeToEntity[request.ResourceType]) + Project: request.GetId().GetProject(), + Domain: request.GetId().GetDomain(), + Name: request.GetId().GetName(), + RequestFilters: request.GetFilters(), + }, common.ResourceTypeToEntity[request.GetResourceType()]) if err != nil { logger.Error(ctx, "failed to get database filter") return nil, err } - sortParameter, err := common.NewSortParameter(request.SortBy, models.DescriptionEntityColumns) + sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.DescriptionEntityColumns) if err != nil { return nil, err } - offset, err := validation.ValidateToken(request.Token) + offset, err := validation.ValidateToken(request.GetToken()) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, - "invalid pagination token %s for ListWorkflows", request.Token) + "invalid pagination token %s for ListWorkflows", request.GetToken()) } listDescriptionEntitiesInput := repoInterfaces.ListResourceInput{ - Limit: int(request.Limit), + Limit: int(request.GetLimit()), Offset: offset, InlineFilters: filters, SortParameter: sortParameter, } output, err := d.db.DescriptionEntityRepo().List(ctx, listDescriptionEntitiesInput) if err != nil { - logger.Debugf(ctx, "Failed to list workflows with [%+v] with err %v", request.Id, err) + logger.Debugf(ctx, "Failed to list workflows with [%+v] with err %v", request.GetId(), err) return nil, err } descriptionEntityList, err := transformers.FromDescriptionEntityModels(output.Entities) @@ -94,7 +94,7 @@ func (d *DescriptionEntityManager) ListDescriptionEntity(ctx context.Context, re return nil, err } var token string - if len(output.Entities) == int(request.Limit) { + if len(output.Entities) == int(request.GetLimit()) { token = strconv.Itoa(offset + len(output.Entities)) } return &admin.DescriptionEntityList{ diff --git a/flyteadmin/pkg/manager/impl/execution_manager.go b/flyteadmin/pkg/manager/impl/execution_manager.go index e700a744d8..fd8f0870f1 100644 --- a/flyteadmin/pkg/manager/impl/execution_manager.go +++ b/flyteadmin/pkg/manager/impl/execution_manager.go @@ -95,8 +95,8 @@ type ExecutionManager struct { } func getExecutionContext(ctx context.Context, id *core.WorkflowExecutionIdentifier) context.Context { - ctx = contextutils.WithExecutionID(ctx, id.Name) - return contextutils.WithProjectDomain(ctx, id.Project, id.Domain) + ctx = contextutils.WithExecutionID(ctx, id.GetName()) + return contextutils.WithProjectDomain(ctx, id.GetProject(), id.GetDomain()) } // Returns the unique string which identifies the authenticated end user (if any). @@ -108,16 +108,16 @@ func getUser(ctx context.Context) string { func (m *ExecutionManager) populateExecutionQueue( ctx context.Context, identifier *core.Identifier, compiledWorkflow *core.CompiledWorkflowClosure) { queueConfig := m.queueAllocator.GetQueue(ctx, identifier) - for _, task := range compiledWorkflow.Tasks { - container := task.Template.GetContainer() + for _, task := range compiledWorkflow.GetTasks() { + container := task.GetTemplate().GetContainer() if container == nil { // Unrecognized target type, nothing to do continue } if queueConfig.DynamicQueue != "" { - logger.Debugf(ctx, "Assigning %s as child queue for task %+v", queueConfig.DynamicQueue, task.Template.Id) - container.Config = append(container.Config, &core.KeyValuePair{ + logger.Debugf(ctx, "Assigning %s as child queue for task %+v", queueConfig.DynamicQueue, task.GetTemplate().GetId()) + container.Config = append(container.GetConfig(), &core.KeyValuePair{ Key: childContainerQueueKey, Value: queueConfig.DynamicQueue, }) @@ -159,8 +159,8 @@ func resolveStringMap(preferredValues, defaultValues mapWithValues, valueName st func (m *ExecutionManager) addPluginOverrides(ctx context.Context, executionID *core.WorkflowExecutionIdentifier, workflowName, launchPlanName string) ([]*admin.PluginOverride, error) { override, err := m.resourceManager.GetResource(ctx, interfaces.ResourceRequest{ - Project: executionID.Project, - Domain: executionID.Domain, + Project: executionID.GetProject(), + Domain: executionID.GetDomain(), Workflow: workflowName, LaunchPlan: launchPlanName, ResourceType: admin.MatchableResource_PLUGIN_OVERRIDE, @@ -169,7 +169,7 @@ func (m *ExecutionManager) addPluginOverrides(ctx context.Context, executionID * return nil, err } if override != nil && override.Attributes != nil && override.Attributes.GetPluginOverrides() != nil { - return override.Attributes.GetPluginOverrides().Overrides, nil + return override.Attributes.GetPluginOverrides().GetOverrides(), nil } return nil, nil } @@ -188,13 +188,13 @@ func (m *ExecutionManager) setCompiledTaskDefaults(ctx context.Context, task *co return } - if task.Template == nil || task.Template.GetContainer() == nil { + if task.GetTemplate() == nil || task.GetTemplate().GetContainer() == nil { // Nothing to do logger.Debugf(ctx, "Not setting default resources for task [%+v], no container resources found to check", task) return } - if task.Template.GetContainer().Resources == nil { + if task.GetTemplate().GetContainer().GetResources() == nil { // In case of no resources on the container, create empty requests and limits // so the container will still have resources configure properly task.Template.GetContainer().Resources = &core.Resources{ @@ -209,7 +209,7 @@ func (m *ExecutionManager) setCompiledTaskDefaults(ctx context.Context, task *co // The IDL representation for container-type tasks represents resources as a list with string quantities. // In order to easily reason about them we convert them to a set where we can O(1) fetch specific resources (e.g. CPU) // and represent them as comparable quantities rather than strings. - taskResourceRequirements := util.GetCompleteTaskResourceRequirements(ctx, task.Template.Id, task) + taskResourceRequirements := util.GetCompleteTaskResourceRequirements(ctx, task.GetTemplate().GetId(), task) cpu := flytek8s.AdjustOrDefaultResource(taskResourceRequirements.Defaults.CPU, taskResourceRequirements.Limits.CPU, platformTaskResources.Defaults.CPU, platformTaskResources.Limits.CPU) @@ -276,22 +276,22 @@ func (m *ExecutionManager) setCompiledTaskDefaults(ctx context.Context, task *co // as well as sets request spec metadata with the inherited principal and adjusted nesting data. func (m *ExecutionManager) getInheritedExecMetadata(ctx context.Context, requestSpec *admin.ExecutionSpec, workflowExecutionID *core.WorkflowExecutionIdentifier) (parentNodeExecutionID uint, sourceExecutionID uint, err error) { - if requestSpec.Metadata == nil || requestSpec.Metadata.ParentNodeExecution == nil { + if requestSpec.GetMetadata() == nil || requestSpec.GetMetadata().GetParentNodeExecution() == nil { return parentNodeExecutionID, sourceExecutionID, nil } - parentNodeExecutionModel, err := util.GetNodeExecutionModel(ctx, m.db, requestSpec.Metadata.ParentNodeExecution) + parentNodeExecutionModel, err := util.GetNodeExecutionModel(ctx, m.db, requestSpec.GetMetadata().GetParentNodeExecution()) if err != nil { logger.Errorf(ctx, "Failed to get node execution [%+v] that launched this execution [%+v] with error %v", - requestSpec.Metadata.ParentNodeExecution, workflowExecutionID, err) + requestSpec.GetMetadata().GetParentNodeExecution(), workflowExecutionID, err) return parentNodeExecutionID, sourceExecutionID, err } parentNodeExecutionID = parentNodeExecutionModel.ID - sourceExecutionModel, err := util.GetExecutionModel(ctx, m.db, requestSpec.Metadata.ParentNodeExecution.ExecutionId) + sourceExecutionModel, err := util.GetExecutionModel(ctx, m.db, requestSpec.GetMetadata().GetParentNodeExecution().GetExecutionId()) if err != nil { logger.Errorf(ctx, "Failed to get workflow execution [%+v] that launched this execution [%+v] with error %v", - requestSpec.Metadata.ParentNodeExecution, workflowExecutionID, err) + requestSpec.GetMetadata().GetParentNodeExecution(), workflowExecutionID, err) return parentNodeExecutionID, sourceExecutionID, err } sourceExecutionID = sourceExecutionModel.ID @@ -301,16 +301,16 @@ func (m *ExecutionManager) getInheritedExecMetadata(ctx context.Context, request logger.Errorf(ctx, "Failed transform parent execution model for child execution [%+v] with err: %v", workflowExecutionID, err) return parentNodeExecutionID, sourceExecutionID, err } - if sourceExecution.Spec.Metadata != nil { - requestSpec.Metadata.Nesting = sourceExecution.Spec.Metadata.Nesting + 1 + if sourceExecution.GetSpec().GetMetadata() != nil { + requestSpec.Metadata.Nesting = sourceExecution.GetSpec().GetMetadata().GetNesting() + 1 } else { requestSpec.Metadata.Nesting = 1 } // If the source execution has a cluster label, inherit it. - if sourceExecution.Spec.ExecutionClusterLabel != nil { - logger.Infof(ctx, "Inherited execution label from source execution [%+v]", sourceExecution.Spec.ExecutionClusterLabel.Value) - requestSpec.ExecutionClusterLabel = sourceExecution.Spec.ExecutionClusterLabel + if sourceExecution.GetSpec().GetExecutionClusterLabel() != nil { + logger.Infof(ctx, "Inherited execution label from source execution [%+v]", sourceExecution.GetSpec().GetExecutionClusterLabel().GetValue()) + requestSpec.ExecutionClusterLabel = sourceExecution.GetSpec().GetExecutionClusterLabel() } return parentNodeExecutionID, sourceExecutionID, nil } @@ -324,20 +324,20 @@ func (m *ExecutionManager) getExecutionConfig(ctx context.Context, request *admi workflowExecConfig := &admin.WorkflowExecutionConfig{} // Merge the request spec into workflowExecConfig - workflowExecConfig = util.MergeIntoExecConfig(workflowExecConfig, request.Spec) + workflowExecConfig = util.MergeIntoExecConfig(workflowExecConfig, request.GetSpec()) var workflowName string - if launchPlan != nil && launchPlan.Spec != nil { + if launchPlan != nil && launchPlan.GetSpec() != nil { // Merge the launch plan spec into workflowExecConfig - workflowExecConfig = util.MergeIntoExecConfig(workflowExecConfig, launchPlan.Spec) - if launchPlan.Spec.WorkflowId != nil { - workflowName = launchPlan.Spec.WorkflowId.Name + workflowExecConfig = util.MergeIntoExecConfig(workflowExecConfig, launchPlan.GetSpec()) + if launchPlan.GetSpec().GetWorkflowId() != nil { + workflowName = launchPlan.GetSpec().GetWorkflowId().GetName() } } // This will get the most specific Workflow Execution Config. matchableResource, err := util.GetMatchableResource(ctx, m.resourceManager, - admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG, request.Project, request.Domain, workflowName) + admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG, request.GetProject(), request.GetDomain(), workflowName) if err != nil { return nil, err } @@ -356,7 +356,7 @@ func (m *ExecutionManager) getExecutionConfig(ctx context.Context, request *admi // system level defaults for the rest. // See FLYTE-2322 for more background information. projectMatchableResource, err := util.GetMatchableResource(ctx, m.resourceManager, - admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG, request.Project, "", "") + admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG, request.GetProject(), "", "") if err != nil { return nil, err } @@ -404,7 +404,7 @@ func (m *ExecutionManager) getExecutionConfig(ctx context.Context, request *admi } func (m *ExecutionManager) getClusterAssignment(ctx context.Context, req *admin.ExecutionCreateRequest) (*admin.ClusterAssignment, error) { - storedAssignment, err := m.fetchClusterAssignment(ctx, req.Project, req.Domain) + storedAssignment, err := m.fetchClusterAssignment(ctx, req.GetProject(), req.GetDomain()) if err != nil { return nil, err } @@ -421,7 +421,7 @@ func (m *ExecutionManager) getClusterAssignment(ctx context.Context, req *admin. } if reqPool != storedPool { - return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "execution with project %q and domain %q cannot run on cluster pool %q, because its configured to run on pool %q", req.Project, req.Domain, reqPool, storedPool) + return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "execution with project %q and domain %q cannot run on cluster pool %q, because its configured to run on pool %q", req.GetProject(), req.GetDomain(), reqPool, storedPool) } return storedAssignment, nil @@ -454,10 +454,10 @@ func (m *ExecutionManager) launchSingleTaskExecution( context.Context, *models.Execution, error) { taskModel, err := m.db.TaskRepo().Get(ctx, repositoryInterfaces.Identifier{ - Project: request.Spec.LaunchPlan.Project, - Domain: request.Spec.LaunchPlan.Domain, - Name: request.Spec.LaunchPlan.Name, - Version: request.Spec.LaunchPlan.Version, + Project: request.GetSpec().GetLaunchPlan().GetProject(), + Domain: request.GetSpec().GetLaunchPlan().GetDomain(), + Name: request.GetSpec().GetLaunchPlan().GetName(), + Version: request.GetSpec().GetLaunchPlan().GetVersion(), }) if err != nil { return nil, nil, err @@ -468,7 +468,7 @@ func (m *ExecutionManager) launchSingleTaskExecution( } // Prepare a skeleton workflow and launch plan - taskIdentifier := request.Spec.LaunchPlan + taskIdentifier := request.GetSpec().GetLaunchPlan() workflowModel, err := util.CreateOrGetWorkflowModel(ctx, request, m.db, m.workflowManager, m.namedEntityManager, taskIdentifier, &task) if err != nil { @@ -481,27 +481,27 @@ func (m *ExecutionManager) launchSingleTaskExecution( } launchPlan, err := util.CreateOrGetLaunchPlan(ctx, m.db, m.config, m.namedEntityManager, taskIdentifier, - workflow.Closure.CompiledWorkflow.Primary.Template.Interface, workflowModel.ID, request.Spec) + workflow.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetInterface(), workflowModel.ID, request.GetSpec()) if err != nil { return nil, nil, err } executionInputs, err := validation.CheckAndFetchInputsForExecution( - request.Inputs, - launchPlan.Spec.FixedInputs, - launchPlan.Closure.ExpectedInputs, + request.GetInputs(), + launchPlan.GetSpec().GetFixedInputs(), + launchPlan.GetClosure().GetExpectedInputs(), ) if err != nil { logger.Debugf(ctx, "Failed to CheckAndFetchInputsForExecution with request.Inputs: %+v"+ "fixed inputs: %+v and expected inputs: %+v with err %v", - request.Inputs, launchPlan.Spec.FixedInputs, launchPlan.Closure.ExpectedInputs, err) + request.GetInputs(), launchPlan.GetSpec().GetFixedInputs(), launchPlan.GetClosure().GetExpectedInputs(), err) return nil, nil, err } name := util.GetExecutionName(request) workflowExecutionID := &core.WorkflowExecutionIdentifier{ - Project: request.Project, - Domain: request.Domain, + Project: request.GetProject(), + Domain: request.GetDomain(), Name: name, } @@ -519,15 +519,15 @@ func (m *ExecutionManager) launchSingleTaskExecution( offloadInputsGroup.Go(func() error { var err error inputsURI, err = common.OffloadLiteralMap(offloadInputsGroupCtx, m.storageClient, executionInputs, // or request.Inputs? - workflowExecutionID.Project, workflowExecutionID.Domain, workflowExecutionID.Name, shared.Inputs) + workflowExecutionID.GetProject(), workflowExecutionID.GetDomain(), workflowExecutionID.GetName(), shared.Inputs) return err }) var userInputsURI storage.DataReference offloadInputsGroup.Go(func() error { var err error - userInputsURI, err = common.OffloadLiteralMap(offloadInputsGroupCtx, m.storageClient, request.Inputs, - workflowExecutionID.Project, workflowExecutionID.Domain, workflowExecutionID.Name, shared.UserInputs) + userInputsURI, err = common.OffloadLiteralMap(offloadInputsGroupCtx, m.storageClient, request.GetInputs(), + workflowExecutionID.GetProject(), workflowExecutionID.GetDomain(), workflowExecutionID.GetName(), shared.UserInputs) return err }) @@ -535,15 +535,15 @@ func (m *ExecutionManager) launchSingleTaskExecution( if err != nil { return nil, nil, err } - closure.CreatedAt = workflow.Closure.CreatedAt + closure.CreatedAt = workflow.GetClosure().GetCreatedAt() workflow.Closure = closure ctx = getExecutionContext(ctx, workflowExecutionID) namespace := common.GetNamespaceName( - m.config.NamespaceMappingConfiguration().GetNamespaceTemplate(), workflowExecutionID.Project, workflowExecutionID.Domain) + m.config.NamespaceMappingConfiguration().GetNamespaceTemplate(), workflowExecutionID.GetProject(), workflowExecutionID.GetDomain()) - requestSpec := request.Spec - if requestSpec.Metadata == nil { + requestSpec := request.GetSpec() + if requestSpec.GetMetadata() == nil { requestSpec.Metadata = &admin.ExecutionMetadata{} } requestSpec.Metadata.Principal = getUser(ctx) @@ -557,13 +557,13 @@ func (m *ExecutionManager) launchSingleTaskExecution( } // Dynamically assign task resource defaults. - platformTaskResources := util.GetTaskResources(ctx, workflow.Id, m.resourceManager, m.config.TaskResourceConfiguration()) - for _, t := range workflow.Closure.CompiledWorkflow.Tasks { + platformTaskResources := util.GetTaskResources(ctx, workflow.GetId(), m.resourceManager, m.config.TaskResourceConfiguration()) + for _, t := range workflow.GetClosure().GetCompiledWorkflow().GetTasks() { m.setCompiledTaskDefaults(ctx, t, platformTaskResources) } // Dynamically assign execution queues. - m.populateExecutionQueue(ctx, workflow.Id, workflow.Closure.CompiledWorkflow) + m.populateExecutionQueue(ctx, workflow.GetId(), workflow.GetClosure().GetCompiledWorkflow()) executionConfig, err := m.getExecutionConfig(ctx, request, nil) if err != nil { @@ -571,23 +571,23 @@ func (m *ExecutionManager) launchSingleTaskExecution( } var labels map[string]string - if executionConfig.Labels != nil { - labels = executionConfig.Labels.Values + if executionConfig.GetLabels() != nil { + labels = executionConfig.GetLabels().GetValues() } - labels, err = m.addProjectLabels(ctx, request.Project, labels) + labels, err = m.addProjectLabels(ctx, request.GetProject(), labels) if err != nil { return nil, nil, err } var annotations map[string]string - if executionConfig.Annotations != nil { - annotations = executionConfig.Annotations.Values + if executionConfig.GetAnnotations() != nil { + annotations = executionConfig.GetAnnotations().GetValues() } var rawOutputDataConfig *admin.RawOutputDataConfig - if executionConfig.RawOutputDataConfig != nil { - rawOutputDataConfig = executionConfig.RawOutputDataConfig + if executionConfig.GetRawOutputDataConfig() != nil { + rawOutputDataConfig = executionConfig.GetRawOutputDataConfig() } clusterAssignment, err := m.getClusterAssignment(ctx, request) @@ -596,8 +596,8 @@ func (m *ExecutionManager) launchSingleTaskExecution( } var executionClusterLabel *admin.ExecutionClusterLabel - if requestSpec.ExecutionClusterLabel != nil { - executionClusterLabel = requestSpec.ExecutionClusterLabel + if requestSpec.GetExecutionClusterLabel() != nil { + executionClusterLabel = requestSpec.GetExecutionClusterLabel() } executionParameters := workflowengineInterfaces.ExecutionParameters{ Inputs: executionInputs, @@ -613,16 +613,16 @@ func (m *ExecutionManager) launchSingleTaskExecution( ExecutionClusterLabel: executionClusterLabel, } - overrides, err := m.addPluginOverrides(ctx, workflowExecutionID, workflowExecutionID.Name, "") + overrides, err := m.addPluginOverrides(ctx, workflowExecutionID, workflowExecutionID.GetName(), "") if err != nil { return nil, nil, err } if overrides != nil { executionParameters.TaskPluginOverrides = overrides } - if request.Spec.Metadata != nil && request.Spec.Metadata.ReferenceExecution != nil && - request.Spec.Metadata.Mode == admin.ExecutionMetadata_RECOVERED { - executionParameters.RecoveryExecution = request.Spec.Metadata.ReferenceExecution + if request.GetSpec().GetMetadata() != nil && request.GetSpec().GetMetadata().GetReferenceExecution() != nil && + request.GetSpec().GetMetadata().GetMode() == admin.ExecutionMetadata_RECOVERED { + executionParameters.RecoveryExecution = request.GetSpec().GetMetadata().GetReferenceExecution() } err = offloadInputsGroup.Wait() @@ -634,9 +634,9 @@ func (m *ExecutionManager) launchSingleTaskExecution( execInfo, err := workflowExecutor.Execute(ctx, workflowengineInterfaces.ExecutionData{ Namespace: namespace, ExecutionID: workflowExecutionID, - ReferenceWorkflowName: workflow.Id.Name, - ReferenceLaunchPlanName: launchPlan.Id.Name, - WorkflowClosure: workflow.Closure.CompiledWorkflow, + ReferenceWorkflowName: workflow.GetId().GetName(), + ReferenceLaunchPlanName: launchPlan.GetId().GetName(), + WorkflowClosure: workflow.GetClosure().GetCompiledWorkflow(), WorkflowClosureReference: storage.DataReference(workflowModel.RemoteClosureIdentifier), ExecutionParameters: executionParameters, OffloadedInputsReference: inputsURI, @@ -645,7 +645,7 @@ func (m *ExecutionManager) launchSingleTaskExecution( if err != nil { m.systemMetrics.PropellerFailures.Inc() logger.Infof(ctx, "Failed to execute workflow %+v with execution id %+v and inputs %+v with err %v", - request, &workflowExecutionID, request.Inputs, err) + request, &workflowExecutionID, request.GetInputs(), err) return nil, nil, err } executionCreatedAt := time.Now() @@ -655,13 +655,13 @@ func (m *ExecutionManager) launchSingleTaskExecution( // Request notification settings takes precedence over the launch plan settings. // If there is no notification in the request and DisableAll is not true, use the settings from the launch plan. var notificationsSettings []*admin.Notification - if launchPlan.Spec.GetEntityMetadata() != nil { - notificationsSettings = launchPlan.Spec.EntityMetadata.GetNotifications() + if launchPlan.GetSpec().GetEntityMetadata() != nil { + notificationsSettings = launchPlan.GetSpec().GetEntityMetadata().GetNotifications() } - if request.Spec.GetNotifications() != nil && request.Spec.GetNotifications().Notifications != nil && - len(request.Spec.GetNotifications().Notifications) > 0 { - notificationsSettings = request.Spec.GetNotifications().Notifications - } else if request.Spec.GetDisableAll() { + if request.GetSpec().GetNotifications() != nil && request.GetSpec().GetNotifications().GetNotifications() != nil && + len(request.GetSpec().GetNotifications().GetNotifications()) > 0 { + notificationsSettings = request.GetSpec().GetNotifications().GetNotifications() + } else if request.GetSpec().GetDisableAll() { notificationsSettings = make([]*admin.Notification, 0) } @@ -673,14 +673,14 @@ func (m *ExecutionManager) launchSingleTaskExecution( // The execution is not considered running until the propeller sends a specific event saying so. CreatedAt: m._clock.Now(), Notifications: notificationsSettings, - WorkflowIdentifier: workflow.Id, + WorkflowIdentifier: workflow.GetId(), ParentNodeExecutionID: parentNodeExecutionID, SourceExecutionID: sourceExecutionID, Cluster: execInfo.Cluster, InputsURI: inputsURI, UserInputsURI: userInputsURI, - SecurityContext: executionConfig.SecurityContext, - LaunchEntity: taskIdentifier.ResourceType, + SecurityContext: executionConfig.GetSecurityContext(), + LaunchEntity: taskIdentifier.GetResourceType(), Namespace: namespace, }) if err != nil { @@ -688,27 +688,27 @@ func (m *ExecutionManager) launchSingleTaskExecution( workflowExecutionID, err) return nil, nil, err } - m.userMetrics.WorkflowExecutionInputBytes.Observe(float64(proto.Size(request.Inputs))) + m.userMetrics.WorkflowExecutionInputBytes.Observe(float64(proto.Size(request.GetInputs()))) return ctx, executionModel, nil } func resolveAuthRole(request *admin.ExecutionCreateRequest, launchPlan *admin.LaunchPlan) *admin.AuthRole { - if request.Spec.AuthRole != nil { - return request.Spec.AuthRole + if request.GetSpec().GetAuthRole() != nil { + return request.GetSpec().GetAuthRole() } - if launchPlan == nil || launchPlan.Spec == nil { + if launchPlan == nil || launchPlan.GetSpec() == nil { return &admin.AuthRole{} } // Set role permissions based on launch plan Auth values. // The branched-ness of this check is due to the presence numerous deprecated fields - if launchPlan.Spec.GetAuthRole() != nil { - return launchPlan.Spec.GetAuthRole() + if launchPlan.GetSpec().GetAuthRole() != nil { + return launchPlan.GetSpec().GetAuthRole() } else if launchPlan.GetSpec().GetAuth() != nil { return &admin.AuthRole{ - AssumableIamRole: launchPlan.GetSpec().GetAuth().AssumableIamRole, - KubernetesServiceAccount: launchPlan.GetSpec().GetAuth().KubernetesServiceAccount, + AssumableIamRole: launchPlan.GetSpec().GetAuth().GetAssumableIamRole(), + KubernetesServiceAccount: launchPlan.GetSpec().GetAuth().GetKubernetesServiceAccount(), } } else if len(launchPlan.GetSpec().GetRole()) > 0 { return &admin.AuthRole{ @@ -722,17 +722,17 @@ func resolveAuthRole(request *admin.ExecutionCreateRequest, launchPlan *admin.La func resolveSecurityCtx(ctx context.Context, executionConfigSecurityCtx *core.SecurityContext, resolvedAuthRole *admin.AuthRole) *core.SecurityContext { // Use security context from the executionConfigSecurityCtx if its set and non empty or else resolve from authRole - if executionConfigSecurityCtx != nil && executionConfigSecurityCtx.RunAs != nil && - (len(executionConfigSecurityCtx.RunAs.K8SServiceAccount) > 0 || - len(executionConfigSecurityCtx.RunAs.IamRole) > 0 || - len(executionConfigSecurityCtx.RunAs.ExecutionIdentity) > 0) { + if executionConfigSecurityCtx != nil && executionConfigSecurityCtx.GetRunAs() != nil && + (len(executionConfigSecurityCtx.GetRunAs().GetK8SServiceAccount()) > 0 || + len(executionConfigSecurityCtx.GetRunAs().GetIamRole()) > 0 || + len(executionConfigSecurityCtx.GetRunAs().GetExecutionIdentity()) > 0) { return executionConfigSecurityCtx } logger.Warn(ctx, "Setting security context from auth Role") return &core.SecurityContext{ RunAs: &core.Identity{ - IamRole: resolvedAuthRole.AssumableIamRole, - K8SServiceAccount: resolvedAuthRole.KubernetesServiceAccount, + IamRole: resolvedAuthRole.GetAssumableIamRole(), + K8SServiceAccount: resolvedAuthRole.GetKubernetesServiceAccount(), }, } } @@ -755,7 +755,7 @@ func (m *ExecutionManager) getStringFromInput(ctx context.Context, inputBinding case *core.Primitive_Integer: strVal = p.GetStringValue() case *core.Primitive_Datetime: - t := time.Unix(p.GetDatetime().Seconds, int64(p.GetDatetime().Nanos)) + t := time.Unix(p.GetDatetime().GetSeconds(), int64(p.GetDatetime().GetNanos())) t = t.In(time.UTC) strVal = t.Format("2006-01-02") case *core.Primitive_StringValue: @@ -812,7 +812,7 @@ func (m *ExecutionManager) fillInTemplateArgs(ctx context.Context, query *core.A var partitions map[string]*core.LabelValue if artifactID.GetPartitions().GetValue() != nil { - partitions = make(map[string]*core.LabelValue, len(artifactID.GetPartitions().Value)) + partitions = make(map[string]*core.LabelValue, len(artifactID.GetPartitions().GetValue())) for k, v := range artifactID.GetPartitions().GetValue() { newValue, err := m.getLabelValue(ctx, v, inputs) if err != nil { @@ -825,20 +825,20 @@ func (m *ExecutionManager) fillInTemplateArgs(ctx context.Context, query *core.A var timePartition *core.TimePartition if artifactID.GetTimePartition().GetValue() != nil { - if artifactID.GetTimePartition().Value.GetTimeValue() != nil { + if artifactID.GetTimePartition().GetValue().GetTimeValue() != nil { // If the time value is set, then just pass it through, nothing to fill in. timePartition = artifactID.GetTimePartition() - } else if artifactID.GetTimePartition().Value.GetInputBinding() != nil { + } else if artifactID.GetTimePartition().GetValue().GetInputBinding() != nil { // Evaluate the time partition input binding - lit, ok := inputs[artifactID.GetTimePartition().Value.GetInputBinding().GetVar()] + lit, ok := inputs[artifactID.GetTimePartition().GetValue().GetInputBinding().GetVar()] if !ok { - return query, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "time partition input binding var [%s] not found in inputs %v", artifactID.GetTimePartition().Value.GetInputBinding().GetVar(), inputs) + return query, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "time partition input binding var [%s] not found in inputs %v", artifactID.GetTimePartition().GetValue().GetInputBinding().GetVar(), inputs) } if lit.GetScalar().GetPrimitive().GetDatetime() == nil { return query, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "time partition binding to input var [%s] failing because %v is not a datetime", - artifactID.GetTimePartition().Value.GetInputBinding().GetVar(), lit) + artifactID.GetTimePartition().GetValue().GetInputBinding().GetVar(), lit) } timePartition = &core.TimePartition{ Value: &core.LabelValue{ @@ -881,8 +881,8 @@ func (m *ExecutionManager) launchExecutionAndPrepareModel( return nil, nil, nil, err } - if request.Spec.LaunchPlan.ResourceType == core.ResourceType_TASK { - logger.Debugf(ctx, "Launching single task execution with [%+v]", request.Spec.LaunchPlan) + if request.GetSpec().GetLaunchPlan().GetResourceType() == core.ResourceType_TASK { + logger.Debugf(ctx, "Launching single task execution with [%+v]", request.GetSpec().GetLaunchPlan()) // When tasks can have defaults this will need to handle Artifacts as well. ctx, model, err := m.launchSingleTaskExecution(ctx, request, requestedAt) return ctx, model, nil, err @@ -892,7 +892,7 @@ func (m *ExecutionManager) launchExecutionAndPrepareModel( func (m *ExecutionManager) launchExecution( ctx context.Context, request *admin.ExecutionCreateRequest, requestedAt time.Time) (context.Context, *models.Execution, []*models.ExecutionTag, error) { - launchPlanModel, err := util.GetLaunchPlanModel(ctx, m.db, request.Spec.LaunchPlan) + launchPlanModel, err := util.GetLaunchPlanModel(ctx, m.db, request.GetSpec().GetLaunchPlan()) if err != nil { logger.Debugf(ctx, "Failed to get launch plan model for ExecutionCreateRequest %+v with err %v", request, err) return nil, nil, nil, err @@ -905,38 +905,38 @@ func (m *ExecutionManager) launchExecution( var lpExpectedInputs *core.ParameterMap var usedArtifactIDs []*core.ArtifactID - lpExpectedInputs = launchPlan.Closure.ExpectedInputs + lpExpectedInputs = launchPlan.GetClosure().GetExpectedInputs() // Artifacts retrieved will need to be stored somewhere to ensure that we can re-emit events if necessary // in the future, and also to make sure that relaunch and recover can use it if necessary. executionInputs, err := validation.CheckAndFetchInputsForExecution( - request.Inputs, - launchPlan.Spec.FixedInputs, + request.GetInputs(), + launchPlan.GetSpec().GetFixedInputs(), lpExpectedInputs, ) if err != nil { logger.Debugf(ctx, "Failed to CheckAndFetchInputsForExecution with request.Inputs: %+v"+ "fixed inputs: %+v and expected inputs: %+v with err %v", - request.Inputs, launchPlan.Spec.FixedInputs, lpExpectedInputs, err) + request.GetInputs(), launchPlan.GetSpec().GetFixedInputs(), lpExpectedInputs, err) return nil, nil, nil, err } - workflowModel, err := util.GetWorkflowModel(ctx, m.db, launchPlan.Spec.WorkflowId) + workflowModel, err := util.GetWorkflowModel(ctx, m.db, launchPlan.GetSpec().GetWorkflowId()) if err != nil { - logger.Debugf(ctx, "Failed to get workflow with id %+v with err %v", launchPlan.Spec.WorkflowId, err) + logger.Debugf(ctx, "Failed to get workflow with id %+v with err %v", launchPlan.GetSpec().GetWorkflowId(), err) return nil, nil, nil, err } workflow, err := transformers.FromWorkflowModel(workflowModel) if err != nil { - logger.Debugf(ctx, "Failed to get workflow with id %+v with err %v", launchPlan.Spec.WorkflowId, err) + logger.Debugf(ctx, "Failed to get workflow with id %+v with err %v", launchPlan.GetSpec().GetWorkflowId(), err) return nil, nil, nil, err } name := util.GetExecutionName(request) workflowExecutionID := &core.WorkflowExecutionIdentifier{ - Project: request.Project, - Domain: request.Domain, + Project: request.GetProject(), + Domain: request.GetDomain(), Name: name, } @@ -947,7 +947,7 @@ func (m *ExecutionManager) launchExecution( var err error closure, err = util.FetchAndGetWorkflowClosure(groupCtx, m.storageClient, workflowModel.RemoteClosureIdentifier) if err != nil { - logger.Debugf(ctx, "Failed to get workflow with id %+v with err %v", launchPlan.Spec.WorkflowId, err) + logger.Debugf(ctx, "Failed to get workflow with id %+v with err %v", launchPlan.GetSpec().GetWorkflowId(), err) } return err }) @@ -956,15 +956,15 @@ func (m *ExecutionManager) launchExecution( group.Go(func() error { var err error inputsURI, err = common.OffloadLiteralMap(groupCtx, m.storageClient, executionInputs, - workflowExecutionID.Project, workflowExecutionID.Domain, workflowExecutionID.Name, shared.Inputs) + workflowExecutionID.GetProject(), workflowExecutionID.GetDomain(), workflowExecutionID.GetName(), shared.Inputs) return err }) var userInputsURI storage.DataReference group.Go(func() error { var err error - userInputsURI, err = common.OffloadLiteralMap(groupCtx, m.storageClient, request.Inputs, - workflowExecutionID.Project, workflowExecutionID.Domain, workflowExecutionID.Name, shared.UserInputs) + userInputsURI, err = common.OffloadLiteralMap(groupCtx, m.storageClient, request.GetInputs(), + workflowExecutionID.GetProject(), workflowExecutionID.GetDomain(), workflowExecutionID.GetName(), shared.UserInputs) return err }) @@ -972,12 +972,12 @@ func (m *ExecutionManager) launchExecution( if err != nil { return nil, nil, nil, err } - closure.CreatedAt = workflow.Closure.CreatedAt + closure.CreatedAt = workflow.GetClosure().GetCreatedAt() workflow.Closure = closure ctx = getExecutionContext(ctx, workflowExecutionID) - var requestSpec = request.Spec - if requestSpec.Metadata == nil { + var requestSpec = request.GetSpec() + if requestSpec.GetMetadata() == nil { requestSpec.Metadata = &admin.ExecutionMetadata{} } requestSpec.Metadata.Principal = getUser(ctx) @@ -992,13 +992,13 @@ func (m *ExecutionManager) launchExecution( } // Dynamically assign task resource defaults. - platformTaskResources := util.GetTaskResources(ctx, workflow.Id, m.resourceManager, m.config.TaskResourceConfiguration()) - for _, task := range workflow.Closure.CompiledWorkflow.Tasks { + platformTaskResources := util.GetTaskResources(ctx, workflow.GetId(), m.resourceManager, m.config.TaskResourceConfiguration()) + for _, task := range workflow.GetClosure().GetCompiledWorkflow().GetTasks() { m.setCompiledTaskDefaults(ctx, task, platformTaskResources) } // Dynamically assign execution queues. - m.populateExecutionQueue(ctx, workflow.Id, workflow.Closure.CompiledWorkflow) + m.populateExecutionQueue(ctx, workflow.GetId(), workflow.GetClosure().GetCompiledWorkflow()) executionConfig, err := m.getExecutionConfig(ctx, request, launchPlan) if err != nil { @@ -1006,23 +1006,23 @@ func (m *ExecutionManager) launchExecution( } namespace := common.GetNamespaceName( - m.config.NamespaceMappingConfiguration().GetNamespaceTemplate(), workflowExecutionID.Project, workflowExecutionID.Domain) + m.config.NamespaceMappingConfiguration().GetNamespaceTemplate(), workflowExecutionID.GetProject(), workflowExecutionID.GetDomain()) - labels, err := resolveStringMap(executionConfig.GetLabels(), launchPlan.Spec.Labels, "labels", m.config.RegistrationValidationConfiguration().GetMaxLabelEntries()) + labels, err := resolveStringMap(executionConfig.GetLabels(), launchPlan.GetSpec().GetLabels(), "labels", m.config.RegistrationValidationConfiguration().GetMaxLabelEntries()) if err != nil { return nil, nil, nil, err } - labels, err = m.addProjectLabels(ctx, request.Project, labels) + labels, err = m.addProjectLabels(ctx, request.GetProject(), labels) if err != nil { return nil, nil, nil, err } - annotations, err := resolveStringMap(executionConfig.GetAnnotations(), launchPlan.Spec.Annotations, "annotations", m.config.RegistrationValidationConfiguration().GetMaxAnnotationEntries()) + annotations, err := resolveStringMap(executionConfig.GetAnnotations(), launchPlan.GetSpec().GetAnnotations(), "annotations", m.config.RegistrationValidationConfiguration().GetMaxAnnotationEntries()) if err != nil { return nil, nil, nil, err } var rawOutputDataConfig *admin.RawOutputDataConfig - if executionConfig.RawOutputDataConfig != nil { - rawOutputDataConfig = executionConfig.RawOutputDataConfig + if executionConfig.GetRawOutputDataConfig() != nil { + rawOutputDataConfig = executionConfig.GetRawOutputDataConfig() } clusterAssignment, err := m.getClusterAssignment(ctx, request) @@ -1031,8 +1031,8 @@ func (m *ExecutionManager) launchExecution( } var executionClusterLabel *admin.ExecutionClusterLabel - if requestSpec.ExecutionClusterLabel != nil { - executionClusterLabel = requestSpec.ExecutionClusterLabel + if requestSpec.GetExecutionClusterLabel() != nil { + executionClusterLabel = requestSpec.GetExecutionClusterLabel() } executionParameters := workflowengineInterfaces.ExecutionParameters{ @@ -1049,7 +1049,7 @@ func (m *ExecutionManager) launchExecution( ExecutionClusterLabel: executionClusterLabel, } - overrides, err := m.addPluginOverrides(ctx, workflowExecutionID, launchPlan.GetSpec().WorkflowId.Name, launchPlan.Id.Name) + overrides, err := m.addPluginOverrides(ctx, workflowExecutionID, launchPlan.GetSpec().GetWorkflowId().GetName(), launchPlan.GetId().GetName()) if err != nil { return nil, nil, nil, err } @@ -1057,9 +1057,9 @@ func (m *ExecutionManager) launchExecution( executionParameters.TaskPluginOverrides = overrides } - if request.Spec.Metadata != nil && request.Spec.Metadata.ReferenceExecution != nil && - request.Spec.Metadata.Mode == admin.ExecutionMetadata_RECOVERED { - executionParameters.RecoveryExecution = request.Spec.Metadata.ReferenceExecution + if request.GetSpec().GetMetadata() != nil && request.GetSpec().GetMetadata().GetReferenceExecution() != nil && + request.GetSpec().GetMetadata().GetMode() == admin.ExecutionMetadata_RECOVERED { + executionParameters.RecoveryExecution = request.GetSpec().GetMetadata().GetReferenceExecution() } executionCreatedAt := time.Now() @@ -1068,12 +1068,12 @@ func (m *ExecutionManager) launchExecution( // Request notification settings takes precedence over the launch plan settings. // If there is no notification in the request and DisableAll is not true, use the settings from the launch plan. var notificationsSettings []*admin.Notification - if launchPlan.Spec.GetEntityMetadata() != nil { - notificationsSettings = launchPlan.Spec.EntityMetadata.GetNotifications() + if launchPlan.GetSpec().GetEntityMetadata() != nil { + notificationsSettings = launchPlan.GetSpec().GetEntityMetadata().GetNotifications() } - if requestSpec.GetNotifications() != nil && requestSpec.GetNotifications().Notifications != nil && - len(requestSpec.GetNotifications().Notifications) > 0 { - notificationsSettings = requestSpec.GetNotifications().Notifications + if requestSpec.GetNotifications() != nil && requestSpec.GetNotifications().GetNotifications() != nil && + len(requestSpec.GetNotifications().GetNotifications()) > 0 { + notificationsSettings = requestSpec.GetNotifications().GetNotifications() } else if requestSpec.GetDisableAll() { notificationsSettings = make([]*admin.Notification, 0) } @@ -1086,13 +1086,13 @@ func (m *ExecutionManager) launchExecution( // The execution is not considered running until the propeller sends a specific event saying so. CreatedAt: m._clock.Now(), Notifications: notificationsSettings, - WorkflowIdentifier: workflow.Id, + WorkflowIdentifier: workflow.GetId(), ParentNodeExecutionID: parentNodeExecutionID, SourceExecutionID: sourceExecutionID, InputsURI: inputsURI, UserInputsURI: userInputsURI, - SecurityContext: executionConfig.SecurityContext, - LaunchEntity: launchPlan.Id.ResourceType, + SecurityContext: executionConfig.GetSecurityContext(), + LaunchEntity: launchPlan.GetId().GetResourceType(), Namespace: namespace, } @@ -1100,9 +1100,9 @@ func (m *ExecutionManager) launchExecution( execInfo, execErr := workflowExecutor.Execute(ctx, workflowengineInterfaces.ExecutionData{ Namespace: namespace, ExecutionID: workflowExecutionID, - ReferenceWorkflowName: workflow.Id.Name, - ReferenceLaunchPlanName: launchPlan.Id.Name, - WorkflowClosure: workflow.Closure.CompiledWorkflow, + ReferenceWorkflowName: workflow.GetId().GetName(), + ReferenceLaunchPlanName: launchPlan.GetId().GetName(), + WorkflowClosure: workflow.GetClosure().GetCompiledWorkflow(), WorkflowClosureReference: storage.DataReference(workflowModel.RemoteClosureIdentifier), ExecutionParameters: executionParameters, OffloadedInputsReference: inputsURI, @@ -1160,7 +1160,7 @@ func (m *ExecutionManager) CreateExecution( *admin.ExecutionCreateResponse, error) { // Prior to flyteidl v0.15.0, Inputs was held in ExecutionSpec. Ensure older clients continue to work. - if request.Inputs == nil || len(request.Inputs.Literals) == 0 { + if request.GetInputs() == nil || len(request.GetInputs().GetLiterals()) == 0 { request.Inputs = request.GetSpec().GetInputs() } var executionModel *models.Execution @@ -1182,7 +1182,7 @@ func (m *ExecutionManager) CreateExecution( func (m *ExecutionManager) RelaunchExecution( ctx context.Context, request *admin.ExecutionRelaunchRequest, requestedAt time.Time) ( *admin.ExecutionCreateResponse, error) { - existingExecutionModel, err := util.GetExecutionModel(ctx, m.db, request.Id) + existingExecutionModel, err := util.GetExecutionModel(ctx, m.db, request.GetId()) if err != nil { logger.Debugf(ctx, "Failed to get execution model for request [%+v] with err %v", request, err) return nil, err @@ -1192,8 +1192,8 @@ func (m *ExecutionManager) RelaunchExecution( return nil, err } - executionSpec := existingExecution.Spec - if executionSpec.Metadata == nil { + executionSpec := existingExecution.GetSpec() + if executionSpec.GetMetadata() == nil { executionSpec.Metadata = &admin.ExecutionMetadata{} } var inputs *core.LiteralMap @@ -1209,17 +1209,17 @@ func (m *ExecutionManager) RelaunchExecution( if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal spec") } - inputs = spec.Inputs + inputs = spec.GetInputs() } executionSpec.Metadata.Mode = admin.ExecutionMetadata_RELAUNCH - executionSpec.Metadata.ReferenceExecution = existingExecution.Id + executionSpec.Metadata.ReferenceExecution = existingExecution.GetId() executionSpec.OverwriteCache = request.GetOverwriteCache() var executionModel *models.Execution var executionTagModel []*models.ExecutionTag ctx, executionModel, executionTagModel, err = m.launchExecutionAndPrepareModel(ctx, &admin.ExecutionCreateRequest{ - Project: request.Id.Project, - Domain: request.Id.Domain, - Name: request.Name, + Project: request.GetId().GetProject(), + Domain: request.GetId().GetDomain(), + Name: request.GetName(), Spec: executionSpec, Inputs: inputs, }, requestedAt) @@ -1231,7 +1231,7 @@ func (m *ExecutionManager) RelaunchExecution( if err != nil { return nil, err } - logger.Debugf(ctx, "Successfully relaunched [%+v] as [%+v]", request.Id, workflowExecutionIdentifier) + logger.Debugf(ctx, "Successfully relaunched [%+v] as [%+v]", request.GetId(), workflowExecutionIdentifier) return &admin.ExecutionCreateResponse{ Id: workflowExecutionIdentifier, }, nil @@ -1240,7 +1240,7 @@ func (m *ExecutionManager) RelaunchExecution( func (m *ExecutionManager) RecoverExecution( ctx context.Context, request *admin.ExecutionRecoverRequest, requestedAt time.Time) ( *admin.ExecutionCreateResponse, error) { - existingExecutionModel, err := util.GetExecutionModel(ctx, m.db, request.Id) + existingExecutionModel, err := util.GetExecutionModel(ctx, m.db, request.GetId()) if err != nil { logger.Debugf(ctx, "Failed to get execution model for request [%+v] with err %v", request, err) return nil, err @@ -1250,8 +1250,8 @@ func (m *ExecutionManager) RecoverExecution( return nil, err } - executionSpec := existingExecution.Spec - if executionSpec.Metadata == nil { + executionSpec := existingExecution.GetSpec() + if executionSpec.GetMetadata() == nil { executionSpec.Metadata = &admin.ExecutionMetadata{} } var inputs *core.LiteralMap @@ -1261,17 +1261,17 @@ func (m *ExecutionManager) RecoverExecution( return nil, err } } - if request.Metadata != nil { - executionSpec.Metadata.ParentNodeExecution = request.Metadata.ParentNodeExecution + if request.GetMetadata() != nil { + executionSpec.Metadata.ParentNodeExecution = request.GetMetadata().GetParentNodeExecution() } executionSpec.Metadata.Mode = admin.ExecutionMetadata_RECOVERED - executionSpec.Metadata.ReferenceExecution = existingExecution.Id + executionSpec.Metadata.ReferenceExecution = existingExecution.GetId() var executionModel *models.Execution var executionTagModel []*models.ExecutionTag ctx, executionModel, executionTagModel, err = m.launchExecutionAndPrepareModel(ctx, &admin.ExecutionCreateRequest{ - Project: request.Id.Project, - Domain: request.Id.Domain, - Name: request.Name, + Project: request.GetId().GetProject(), + Domain: request.GetId().GetDomain(), + Name: request.GetName(), Spec: executionSpec, Inputs: inputs, }, requestedAt) @@ -1283,7 +1283,7 @@ func (m *ExecutionManager) RecoverExecution( if err != nil { return nil, err } - logger.Infof(ctx, "Successfully recovered [%+v] as [%+v]", request.Id, workflowExecutionIdentifier) + logger.Infof(ctx, "Successfully recovered [%+v] as [%+v]", request.GetId(), workflowExecutionIdentifier) return &admin.ExecutionCreateResponse{ Id: workflowExecutionIdentifier, }, nil @@ -1304,20 +1304,20 @@ func (m *ExecutionManager) emitScheduledWorkflowMetrics( "[%s/%s/%s]", executionModel.Project, executionModel.Domain, executionModel.Name) return } - launchPlan, err := util.GetLaunchPlan(context.Background(), m.db, execution.Spec.LaunchPlan) + launchPlan, err := util.GetLaunchPlan(context.Background(), m.db, execution.GetSpec().GetLaunchPlan()) if err != nil { logger.Warningf(context.Background(), "failed to find launch plan when emitting scheduled workflow execution stats with for "+ - "execution: [%+v] and launch plan [%+v]", execution.Id, execution.Spec.LaunchPlan) + "execution: [%+v] and launch plan [%+v]", execution.GetId(), execution.GetSpec().GetLaunchPlan()) return } - if launchPlan.Spec.EntityMetadata == nil || - launchPlan.Spec.EntityMetadata.Schedule == nil || - launchPlan.Spec.EntityMetadata.Schedule.KickoffTimeInputArg == "" { + if launchPlan.GetSpec().GetEntityMetadata() == nil || + launchPlan.GetSpec().GetEntityMetadata().GetSchedule() == nil || + launchPlan.GetSpec().GetEntityMetadata().GetSchedule().GetKickoffTimeInputArg() == "" { // Kickoff time arguments aren't always required for scheduled workflows. logger.Debugf(context.Background(), "no kickoff time to report for scheduled workflow execution [%+v]", - execution.Id) + execution.GetId()) return } @@ -1327,13 +1327,13 @@ func (m *ExecutionManager) emitScheduledWorkflowMetrics( logger.Errorf(ctx, "Failed to find inputs for emitting schedule delay event from uri: [%v]", executionModel.InputsURI) return } - scheduledKickoffTimeProto := inputs.Literals[launchPlan.Spec.EntityMetadata.Schedule.KickoffTimeInputArg] + scheduledKickoffTimeProto := inputs.GetLiterals()[launchPlan.GetSpec().GetEntityMetadata().GetSchedule().GetKickoffTimeInputArg()] if scheduledKickoffTimeProto == nil || scheduledKickoffTimeProto.GetScalar() == nil || scheduledKickoffTimeProto.GetScalar().GetPrimitive() == nil || scheduledKickoffTimeProto.GetScalar().GetPrimitive().GetDatetime() == nil { logger.Warningf(context.Background(), "failed to find scheduled kickoff time datetime value for scheduled workflow execution [%+v] "+ - "although one was expected", execution.Id) + "although one was expected", execution.GetId()) return } scheduledKickoffTime, err := ptypes.Timestamp(scheduledKickoffTimeProto.GetScalar().GetPrimitive().GetDatetime()) @@ -1347,16 +1347,16 @@ func (m *ExecutionManager) emitScheduledWorkflowMetrics( return } - domainCounterMap, ok := m.userMetrics.ScheduledExecutionDelays[execution.Id.Project] + domainCounterMap, ok := m.userMetrics.ScheduledExecutionDelays[execution.GetId().GetProject()] if !ok { domainCounterMap = make(map[string]*promutils.StopWatch) - m.userMetrics.ScheduledExecutionDelays[execution.Id.Project] = domainCounterMap + m.userMetrics.ScheduledExecutionDelays[execution.GetId().GetProject()] = domainCounterMap } var watch *promutils.StopWatch - watch, ok = domainCounterMap[execution.Id.Domain] + watch, ok = domainCounterMap[execution.GetId().GetDomain()] if !ok { - newWatch, err := m.systemMetrics.Scope.NewSubScope(execution.Id.Project).NewSubScope(execution.Id.Domain).NewStopWatch( + newWatch, err := m.systemMetrics.Scope.NewSubScope(execution.GetId().GetProject()).NewSubScope(execution.GetId().GetDomain()).NewStopWatch( "scheduled_execution_delay", "delay between scheduled execution time and time execution was observed running", time.Nanosecond) @@ -1367,7 +1367,7 @@ func (m *ExecutionManager) emitScheduledWorkflowMetrics( return } watch = &newWatch - domainCounterMap[execution.Id.Domain] = watch + domainCounterMap[execution.GetId().GetDomain()] = watch } watch.Observe(scheduledKickoffTime, runningEventTime) } @@ -1421,30 +1421,30 @@ func (m *ExecutionManager) CreateWorkflowEvent(ctx context.Context, request *adm *admin.WorkflowExecutionEventResponse, error) { err := validation.ValidateCreateWorkflowEventRequest(request, m.config.ApplicationConfiguration().GetRemoteDataConfig().MaxSizeInBytes) if err != nil { - logger.Debugf(ctx, "received invalid CreateWorkflowEventRequest [%s]: %v", request.RequestId, err) + logger.Debugf(ctx, "received invalid CreateWorkflowEventRequest [%s]: %v", request.GetRequestId(), err) return nil, err } - ctx = getExecutionContext(ctx, request.Event.ExecutionId) + ctx = getExecutionContext(ctx, request.GetEvent().GetExecutionId()) logger.Debugf(ctx, "Received workflow execution event for [%+v] transitioning to phase [%v]", - request.Event.ExecutionId, request.Event.Phase) + request.GetEvent().GetExecutionId(), request.GetEvent().GetPhase()) - executionModel, err := util.GetExecutionModel(ctx, m.db, request.Event.ExecutionId) + executionModel, err := util.GetExecutionModel(ctx, m.db, request.GetEvent().GetExecutionId()) if err != nil { logger.Debugf(ctx, "failed to find execution [%+v] for recorded event [%s]: %v", - request.Event.ExecutionId, request.RequestId, err) + request.GetEvent().GetExecutionId(), request.GetRequestId(), err) return nil, err } wfExecPhase := core.WorkflowExecution_Phase(core.WorkflowExecution_Phase_value[executionModel.Phase]) // Subsequent queued events announcing a cluster reassignment are permitted. - if request.Event.Phase != core.WorkflowExecution_QUEUED { - if wfExecPhase == request.Event.Phase { + if request.GetEvent().GetPhase() != core.WorkflowExecution_QUEUED { + if wfExecPhase == request.GetEvent().GetPhase() { logger.Debugf(ctx, "This phase %s was already recorded for workflow execution %v", - wfExecPhase.String(), request.Event.ExecutionId) + wfExecPhase.String(), request.GetEvent().GetExecutionId()) return nil, errors.NewFlyteAdminErrorf(codes.AlreadyExists, "This phase %s was already recorded for workflow execution %v", - wfExecPhase.String(), request.Event.ExecutionId) - } else if err := validation.ValidateCluster(ctx, executionModel.Cluster, request.Event.ProducerId); err != nil { + wfExecPhase.String(), request.GetEvent().GetExecutionId()) + } else if err := validation.ValidateCluster(ctx, executionModel.Cluster, request.GetEvent().GetProducerId()); err != nil { // Only perform event cluster validation **after** an execution has moved on from QUEUED. return nil, err } @@ -1453,22 +1453,22 @@ func (m *ExecutionManager) CreateWorkflowEvent(ctx context.Context, request *adm if common.IsExecutionTerminal(wfExecPhase) { // Cannot go backwards in time from a terminal state to anything else curPhase := wfExecPhase.String() - errorMsg := fmt.Sprintf("Invalid phase change from %s to %s for workflow execution %v", curPhase, request.Event.Phase.String(), request.Event.ExecutionId) + errorMsg := fmt.Sprintf("Invalid phase change from %s to %s for workflow execution %v", curPhase, request.GetEvent().GetPhase().String(), request.GetEvent().GetExecutionId()) return nil, errors.NewAlreadyInTerminalStateError(ctx, errorMsg, curPhase) - } else if wfExecPhase == core.WorkflowExecution_RUNNING && request.Event.Phase == core.WorkflowExecution_QUEUED { + } else if wfExecPhase == core.WorkflowExecution_RUNNING && request.GetEvent().GetPhase() == core.WorkflowExecution_QUEUED { // Cannot go back in time from RUNNING -> QUEUED return nil, errors.NewFlyteAdminErrorf(codes.FailedPrecondition, "Cannot go from %s to %s for workflow execution %v", - wfExecPhase.String(), request.Event.Phase.String(), request.Event.ExecutionId) - } else if wfExecPhase == core.WorkflowExecution_ABORTING && !common.IsExecutionTerminal(request.Event.Phase) { + wfExecPhase.String(), request.GetEvent().GetPhase().String(), request.GetEvent().GetExecutionId()) + } else if wfExecPhase == core.WorkflowExecution_ABORTING && !common.IsExecutionTerminal(request.GetEvent().GetPhase()) { return nil, errors.NewFlyteAdminErrorf(codes.FailedPrecondition, - "Invalid phase change from aborting to %s for workflow execution %v", request.Event.Phase.String(), request.Event.ExecutionId) + "Invalid phase change from aborting to %s for workflow execution %v", request.GetEvent().GetPhase().String(), request.GetEvent().GetExecutionId()) } err = transformers.UpdateExecutionModelState(ctx, executionModel, request, m.config.ApplicationConfiguration().GetRemoteDataConfig().InlineEventDataPolicy, m.storageClient) if err != nil { logger.Debugf(ctx, "failed to transform updated workflow execution model [%+v] after receiving event with err: %v", - request.Event.ExecutionId, err) + request.GetEvent().GetExecutionId(), err) return nil, err } err = m.db.ExecutionRepo().Update(ctx, *executionModel) @@ -1479,28 +1479,28 @@ func (m *ExecutionManager) CreateWorkflowEvent(ctx context.Context, request *adm } m.dbEventWriter.Write(request) - if request.Event.Phase == core.WorkflowExecution_RUNNING { + if request.GetEvent().GetPhase() == core.WorkflowExecution_RUNNING { // Workflow executions are created in state "UNDEFINED". All the time up until a RUNNING event is received is // considered system-induced delay. if executionModel.Mode == int32(admin.ExecutionMetadata_SCHEDULED) { - go m.emitScheduledWorkflowMetrics(ctx, executionModel, request.Event.OccurredAt) + go m.emitScheduledWorkflowMetrics(ctx, executionModel, request.GetEvent().GetOccurredAt()) } - } else if common.IsExecutionTerminal(request.Event.Phase) { - if request.Event.Phase == core.WorkflowExecution_FAILED { + } else if common.IsExecutionTerminal(request.GetEvent().GetPhase()) { + if request.GetEvent().GetPhase() == core.WorkflowExecution_FAILED { // request.Event is expected to be of type WorkflowExecutionEvent_Error when workflow fails. // if not, log the error and continue - if err := request.Event.GetError(); err != nil { - ctx = context.WithValue(ctx, common.ErrorKindKey, err.Kind.String()) + if err := request.GetEvent().GetError(); err != nil { + ctx = context.WithValue(ctx, common.ErrorKindKey, err.GetKind().String()) } else { logger.Warning(ctx, "Failed to parse error for FAILED request [%+v]", request) } } m.systemMetrics.ActiveExecutions.Dec() - m.systemMetrics.ExecutionsTerminated.Inc(contextutils.WithPhase(ctx, request.Event.Phase.String())) - go m.emitOverallWorkflowExecutionTime(executionModel, request.Event.OccurredAt) - if request.Event.GetOutputData() != nil { - m.userMetrics.WorkflowExecutionOutputBytes.Observe(float64(proto.Size(request.Event.GetOutputData()))) + m.systemMetrics.ExecutionsTerminated.Inc(contextutils.WithPhase(ctx, request.GetEvent().GetPhase().String())) + go m.emitOverallWorkflowExecutionTime(executionModel, request.GetEvent().GetOccurredAt()) + if request.GetEvent().GetOutputData() != nil { + m.userMetrics.WorkflowExecutionOutputBytes.Observe(float64(proto.Size(request.GetEvent().GetOutputData()))) } err = m.publishNotifications(ctx, request, *executionModel) @@ -1515,14 +1515,14 @@ func (m *ExecutionManager) CreateWorkflowEvent(ctx context.Context, request *adm if err := m.eventPublisher.Publish(ctx, proto.MessageName(request), request); err != nil { m.systemMetrics.PublishEventError.Inc() - logger.Infof(ctx, "error publishing event [%+v] with err: [%v]", request.RequestId, err) + logger.Infof(ctx, "error publishing event [%+v] with err: [%v]", request.GetRequestId(), err) } go func() { ceCtx := context.TODO() if err := m.cloudEventPublisher.Publish(ceCtx, proto.MessageName(request), request); err != nil { m.systemMetrics.PublishEventError.Inc() - logger.Infof(ctx, "error publishing cloud event [%+v] with err: [%v]", request.RequestId, err) + logger.Infof(ctx, "error publishing cloud event [%+v] with err: [%v]", request.GetRequestId(), err) } }() @@ -1531,12 +1531,12 @@ func (m *ExecutionManager) CreateWorkflowEvent(ctx context.Context, request *adm func (m *ExecutionManager) GetExecution( ctx context.Context, request *admin.WorkflowExecutionGetRequest) (*admin.Execution, error) { - if err := validation.ValidateWorkflowExecutionIdentifier(request.Id); err != nil { + if err := validation.ValidateWorkflowExecutionIdentifier(request.GetId()); err != nil { logger.Debugf(ctx, "GetExecution request [%+v] failed validation with err: %v", request, err) return nil, err } - ctx = getExecutionContext(ctx, request.Id) - executionModel, err := util.GetExecutionModel(ctx, m.db, request.Id) + ctx = getExecutionContext(ctx, request.GetId()) + executionModel, err := util.GetExecutionModel(ctx, m.db, request.GetId()) if err != nil { logger.Debugf(ctx, "Failed to get execution model for request [%+v] with err: %v", request, err) return nil, err @@ -1547,7 +1547,7 @@ func (m *ExecutionManager) GetExecution( DefaultNamespace: namespace, }) if transformerErr != nil { - logger.Debugf(ctx, "Failed to transform execution model [%+v] to proto object with err: %v", request.Id, + logger.Debugf(ctx, "Failed to transform execution model [%+v] to proto object with err: %v", request.GetId(), transformerErr) return nil, transformerErr } @@ -1557,18 +1557,18 @@ func (m *ExecutionManager) GetExecution( func (m *ExecutionManager) UpdateExecution(ctx context.Context, request *admin.ExecutionUpdateRequest, requestedAt time.Time) (*admin.ExecutionUpdateResponse, error) { - if err := validation.ValidateWorkflowExecutionIdentifier(request.Id); err != nil { + if err := validation.ValidateWorkflowExecutionIdentifier(request.GetId()); err != nil { logger.Debugf(ctx, "UpdateExecution request [%+v] failed validation with err: %v", request, err) return nil, err } - ctx = getExecutionContext(ctx, request.Id) - executionModel, err := util.GetExecutionModel(ctx, m.db, request.Id) + ctx = getExecutionContext(ctx, request.GetId()) + executionModel, err := util.GetExecutionModel(ctx, m.db, request.GetId()) if err != nil { logger.Debugf(ctx, "Failed to get execution model for request [%+v] with err: %v", request, err) return nil, err } - if err = transformers.UpdateExecutionModelStateChangeDetails(executionModel, request.State, requestedAt, + if err = transformers.UpdateExecutionModelStateChangeDetails(executionModel, request.GetState(), requestedAt, getUser(ctx)); err != nil { return nil, err } @@ -1582,15 +1582,15 @@ func (m *ExecutionManager) UpdateExecution(ctx context.Context, request *admin.E func (m *ExecutionManager) GetExecutionData( ctx context.Context, request *admin.WorkflowExecutionGetDataRequest) (*admin.WorkflowExecutionGetDataResponse, error) { - ctx = getExecutionContext(ctx, request.Id) - executionModel, err := util.GetExecutionModel(ctx, m.db, request.Id) + ctx = getExecutionContext(ctx, request.GetId()) + executionModel, err := util.GetExecutionModel(ctx, m.db, request.GetId()) if err != nil { logger.Debugf(ctx, "Failed to get execution model for request [%+v] with err: %v", request, err) return nil, err } execution, err := transformers.FromExecutionModel(ctx, *executionModel, transformers.DefaultExecutionTransformerOptions) if err != nil { - logger.Debugf(ctx, "Failed to transform execution model [%+v] to proto object with err: %v", request.Id, err) + logger.Debugf(ctx, "Failed to transform execution model [%+v] to proto object with err: %v", request.GetId(), err) return nil, err } // Prior to flyteidl v0.15.0, Inputs were held in ExecutionClosure and were not offloaded. Ensure we can return the inputs as expected. @@ -1600,7 +1600,7 @@ func (m *ExecutionManager) GetExecutionData( if err := proto.Unmarshal(executionModel.Closure, closure); err != nil { return nil, err } - newInputsURI, err := common.OffloadLiteralMap(ctx, m.storageClient, closure.ComputedInputs, request.Id.Project, request.Id.Domain, request.Id.Name, shared.Inputs) + newInputsURI, err := common.OffloadLiteralMap(ctx, m.storageClient, closure.GetComputedInputs(), request.GetId().GetProject(), request.GetId().GetDomain(), request.GetId().GetName(), shared.Inputs) if err != nil { return nil, err } @@ -1626,7 +1626,7 @@ func (m *ExecutionManager) GetExecutionData( group.Go(func() error { var err error outputs, outputURLBlob, err = util.GetOutputs(groupCtx, m.urlData, m.config.ApplicationConfiguration().GetRemoteDataConfig(), - m.storageClient, util.ToExecutionClosureInterface(execution.Closure)) + m.storageClient, util.ToExecutionClosureInterface(execution.GetClosure())) return err }) @@ -1642,11 +1642,11 @@ func (m *ExecutionManager) GetExecutionData( FullOutputs: outputs, } - m.userMetrics.WorkflowExecutionInputBytes.Observe(float64(response.Inputs.Bytes)) - if response.Outputs.Bytes > 0 { - m.userMetrics.WorkflowExecutionOutputBytes.Observe(float64(response.Outputs.Bytes)) - } else if response.FullOutputs != nil { - m.userMetrics.WorkflowExecutionOutputBytes.Observe(float64(proto.Size(response.FullOutputs))) + m.userMetrics.WorkflowExecutionInputBytes.Observe(float64(response.GetInputs().GetBytes())) + if response.GetOutputs().GetBytes() > 0 { + m.userMetrics.WorkflowExecutionOutputBytes.Observe(float64(response.GetOutputs().GetBytes())) + } else if response.GetFullOutputs() != nil { + m.userMetrics.WorkflowExecutionOutputBytes.Observe(float64(proto.Size(response.GetFullOutputs()))) } return response, nil } @@ -1658,26 +1658,26 @@ func (m *ExecutionManager) ListExecutions( logger.Debugf(ctx, "ListExecutions request [%+v] failed validation with err: %v", request, err) return nil, err } - ctx = contextutils.WithProjectDomain(ctx, request.Id.Project, request.Id.Domain) + ctx = contextutils.WithProjectDomain(ctx, request.GetId().GetProject(), request.GetId().GetDomain()) filters, err := util.GetDbFilters(util.FilterSpec{ - Project: request.Id.Project, - Domain: request.Id.Domain, - Name: request.Id.Name, // Optional, may be empty. - RequestFilters: request.Filters, + Project: request.GetId().GetProject(), + Domain: request.GetId().GetDomain(), + Name: request.GetId().GetName(), // Optional, may be empty. + RequestFilters: request.GetFilters(), }, common.Execution) if err != nil { return nil, err } - sortParameter, err := common.NewSortParameter(request.SortBy, models.ExecutionColumns) + sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.ExecutionColumns) if err != nil { return nil, err } - offset, err := validation.ValidateToken(request.Token) + offset, err := validation.ValidateToken(request.GetToken()) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid pagination token %s for ListExecutions", - request.Token) + request.GetToken()) } joinTableEntities := make(map[common.Entity]bool) for _, filter := range filters { @@ -1690,7 +1690,7 @@ func (m *ExecutionManager) ListExecutions( } listExecutionsInput := repositoryInterfaces.ListResourceInput{ - Limit: int(request.Limit), + Limit: int(request.GetLimit()), Offset: offset, InlineFilters: filters, SortParameter: sortParameter, @@ -1717,7 +1717,7 @@ func (m *ExecutionManager) ListExecutions( // END TO BE DELETED var token string - if len(executionList) == int(request.Limit) { + if len(executionList) == int(request.GetLimit()) { token = strconv.Itoa(offset + len(executionList)) } return &admin.ExecutionList{ @@ -1736,16 +1736,16 @@ func (m *ExecutionManager) publishNotifications(ctx context.Context, request *ad if err != nil { // This shouldn't happen because execution manager marshaled the data into models.Execution. m.systemMetrics.TransformerError.Inc() - return errors.NewFlyteAdminErrorf(codes.Internal, "Failed to transform execution [%+v] with err: %v", request.Event.ExecutionId, err) + return errors.NewFlyteAdminErrorf(codes.Internal, "Failed to transform execution [%+v] with err: %v", request.GetEvent().GetExecutionId(), err) } - var notificationsList = adminExecution.Closure.Notifications + var notificationsList = adminExecution.GetClosure().GetNotifications() logger.Debugf(ctx, "publishing notifications for execution [%+v] in state [%+v] for notifications [%+v]", - request.Event.ExecutionId, request.Event.Phase, notificationsList) + request.GetEvent().GetExecutionId(), request.GetEvent().GetPhase(), notificationsList) for _, notification := range notificationsList { // Check if the notification phase matches the current one. var matchPhase = false - for _, phase := range notification.Phases { - if phase == request.Event.Phase { + for _, phase := range notification.GetPhases() { + if phase == request.GetEvent().GetPhase() { matchPhase = true } } @@ -1765,11 +1765,11 @@ func (m *ExecutionManager) publishNotifications(ctx context.Context, request *ad } else if notification.GetSlack() != nil { emailNotification.RecipientsEmail = notification.GetSlack().GetRecipientsEmail() } else { - logger.Debugf(ctx, "failed to publish notification, encountered unrecognized type: %v", notification.Type) + logger.Debugf(ctx, "failed to publish notification, encountered unrecognized type: %v", notification.GetType()) m.systemMetrics.UnexpectedDataError.Inc() // Unsupported notification types should have been caught when the launch plan was being created. return errors.NewFlyteAdminErrorf(codes.Internal, "Unsupported notification type [%v] for execution [%+v]", - notification.Type, request.Event.ExecutionId) + notification.GetType(), request.GetEvent().GetExecutionId()) } // Convert the email Notification into an email message to be published. @@ -1789,19 +1789,19 @@ func (m *ExecutionManager) publishNotifications(ctx context.Context, request *ad func (m *ExecutionManager) TerminateExecution( ctx context.Context, request *admin.ExecutionTerminateRequest) (*admin.ExecutionTerminateResponse, error) { - if err := validation.ValidateWorkflowExecutionIdentifier(request.Id); err != nil { + if err := validation.ValidateWorkflowExecutionIdentifier(request.GetId()); err != nil { logger.Debugf(ctx, "received terminate execution request: %v with invalid identifier: %v", request, err) return nil, err } - ctx = getExecutionContext(ctx, request.Id) + ctx = getExecutionContext(ctx, request.GetId()) // Save the abort reason (best effort) executionModel, err := m.db.ExecutionRepo().Get(ctx, repositoryInterfaces.Identifier{ - Project: request.Id.Project, - Domain: request.Id.Domain, - Name: request.Id.Name, + Project: request.GetId().GetProject(), + Domain: request.GetId().GetDomain(), + Name: request.GetId().GetName(), }) if err != nil { - logger.Infof(ctx, "couldn't find execution [%+v] to save termination cause", request.Id) + logger.Infof(ctx, "couldn't find execution [%+v] to save termination cause", request.GetId()) return nil, err } @@ -1809,24 +1809,24 @@ func (m *ExecutionManager) TerminateExecution( return nil, errors.NewAlreadyInTerminalStateError(ctx, "Cannot abort an already terminated workflow execution", executionModel.Phase) } - err = transformers.SetExecutionAborting(&executionModel, request.Cause, getUser(ctx)) + err = transformers.SetExecutionAborting(&executionModel, request.GetCause(), getUser(ctx)) if err != nil { - logger.Debugf(ctx, "failed to add abort metadata for execution [%+v] with err: %v", request.Id, err) + logger.Debugf(ctx, "failed to add abort metadata for execution [%+v] with err: %v", request.GetId(), err) return nil, err } err = m.db.ExecutionRepo().Update(ctx, executionModel) if err != nil { - logger.Debugf(ctx, "failed to save abort cause for terminated execution: %+v with err: %v", request.Id, err) + logger.Debugf(ctx, "failed to save abort cause for terminated execution: %+v with err: %v", request.GetId(), err) return nil, err } workflowExecutor := plugins.Get[workflowengineInterfaces.WorkflowExecutor](m.pluginRegistry, plugins.PluginIDWorkflowExecutor) err = workflowExecutor.Abort(ctx, workflowengineInterfaces.AbortData{ Namespace: common.GetNamespaceName( - m.config.NamespaceMappingConfiguration().GetNamespaceTemplate(), request.Id.Project, request.Id.Domain), + m.config.NamespaceMappingConfiguration().GetNamespaceTemplate(), request.GetId().GetProject(), request.GetId().GetDomain()), - ExecutionID: request.Id, + ExecutionID: request.GetId(), Cluster: executionModel.Cluster, }) if err != nil { @@ -1916,7 +1916,7 @@ func (m *ExecutionManager) addProjectLabels(ctx context.Context, projectName str return nil, err } // passing nil domain as not needed to retrieve labels - projectLabels := transformers.FromProjectModel(project, nil).Labels.GetValues() + projectLabels := transformers.FromProjectModel(project, nil).GetLabels().GetValues() if initialLabels == nil { initialLabels = make(map[string]string) diff --git a/flyteadmin/pkg/manager/impl/execution_manager_test.go b/flyteadmin/pkg/manager/impl/execution_manager_test.go index 5e874a4589..79068d25ff 100644 --- a/flyteadmin/pkg/manager/impl/execution_manager_test.go +++ b/flyteadmin/pkg/manager/impl/execution_manager_test.go @@ -60,7 +60,7 @@ const ( executionClusterLabel = "execution_cluster_label" ) -var spec = testutils.GetExecutionRequest().Spec +var spec = testutils.GetExecutionRequest().GetSpec() var specBytes, _ = proto.Marshal(spec) var phase = core.WorkflowExecution_RUNNING.String() var closure = admin.ExecutionClosure{ @@ -94,8 +94,8 @@ var resourceLimits = runtimeInterfaces.TaskResourceSet{ func getLegacySpec() *admin.ExecutionSpec { executionRequest := testutils.GetExecutionRequest() - legacySpec := executionRequest.Spec - legacySpec.Inputs = executionRequest.Inputs + legacySpec := executionRequest.GetSpec() + legacySpec.Inputs = executionRequest.GetInputs() return legacySpec } @@ -121,7 +121,7 @@ func getExpectedLegacySpecBytes() []byte { } func getExpectedSpec() *admin.ExecutionSpec { - expectedSpec := testutils.GetExecutionRequest().Spec + expectedSpec := testutils.GetExecutionRequest().GetSpec() expectedSpec.Metadata = &admin.ExecutionMetadata{ SystemMetadata: &admin.SystemMetadata{ Namespace: "project-domain", @@ -138,7 +138,7 @@ func getExpectedSpecBytes() []byte { func getLegacyClosure() *admin.ExecutionClosure { return &admin.ExecutionClosure{ Phase: core.WorkflowExecution_RUNNING, - ComputedInputs: getLegacySpec().Inputs, + ComputedInputs: getLegacySpec().GetInputs(), StateChangeDetails: &admin.ExecutionStateChangeDetails{ State: admin.ExecutionState_EXECUTION_ACTIVE, OccurredAt: testutils.MockCreatedAtProto, @@ -153,7 +153,7 @@ func getLegacyClosureBytes() []byte { func getLegacyExecutionRequest() *admin.ExecutionCreateRequest { r := testutils.GetExecutionRequest() - r.Spec.Inputs = r.Inputs + r.Spec.Inputs = r.GetInputs() r.Inputs = nil return r } @@ -193,7 +193,7 @@ func setDefaultLpCallbackForExecTest(repository interfaces.Repository) { lpSpecBytes, _ := proto.Marshal(lpSpec) lpClosure := admin.LaunchPlanClosure{ - ExpectedInputs: lpSpec.DefaultInputs, + ExpectedInputs: lpSpec.GetDefaultInputs(), } lpClosureBytes, _ := proto.Marshal(&lpClosure) @@ -313,11 +313,11 @@ func TestCreateExecution(t *testing.T) { var spec admin.ExecutionSpec err := proto.Unmarshal(input.Spec, &spec) assert.NoError(t, err) - assert.Equal(t, principal, spec.Metadata.Principal) - assert.Equal(t, rawOutput, spec.RawOutputDataConfig.OutputLocationPrefix) - assert.True(t, proto.Equal(spec.ClusterAssignment, &clusterAssignment)) + assert.Equal(t, principal, spec.GetMetadata().GetPrincipal()) + assert.Equal(t, rawOutput, spec.GetRawOutputDataConfig().GetOutputLocationPrefix()) + assert.True(t, proto.Equal(spec.GetClusterAssignment(), &clusterAssignment)) assert.Equal(t, "launch_plan", input.LaunchEntity) - assert.Equal(t, spec.GetMetadata().GetSystemMetadata().Namespace, "project-domain") + assert.Equal(t, spec.GetMetadata().GetSystemMetadata().GetNamespace(), "project-domain") return nil }) setDefaultLpCallbackForExecTest(repository) @@ -347,10 +347,10 @@ func TestCreateExecution(t *testing.T) { mockExecutor.OnExecuteMatch(mock.Anything, mock.MatchedBy(func(data workflowengineInterfaces.ExecutionData) bool { tasks := data.WorkflowClosure.GetTasks() for _, task := range tasks { - assert.Equal(t, len(resources.Requests), len(task.Template.GetContainer().Resources.Requests)) - for i, request := range resources.Requests { - assert.True(t, proto.Equal(request, task.Template.GetContainer().Resources.Requests[i])) - assert.True(t, proto.Equal(request, task.Template.GetContainer().Resources.Limits[i])) + assert.Equal(t, len(resources.GetRequests()), len(task.GetTemplate().GetContainer().GetResources().GetRequests())) + for i, request := range resources.GetRequests() { + assert.True(t, proto.Equal(request, task.GetTemplate().GetContainer().GetResources().GetRequests()[i])) + assert.True(t, proto.Equal(request, task.GetTemplate().GetContainer().GetResources().GetLimits()[i])) } } @@ -401,7 +401,7 @@ func TestCreateExecution(t *testing.T) { Id: &executionIdentifier, } assert.NoError(t, err) - assert.True(t, proto.Equal(expectedResponse.Id, response.Id)) + assert.True(t, proto.Equal(expectedResponse.GetId(), response.GetId())) // TODO: Check for offloaded inputs } @@ -436,9 +436,9 @@ func TestCreateExecutionFromWorkflowNode(t *testing.T) { var clusterLabel = &admin.ExecutionClusterLabel{Value: executionClusterLabel} repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetGetCallback( func(ctx context.Context, input interfaces.Identifier) (models.Execution, error) { - assert.EqualValues(t, input.Project, parentNodeExecutionID.ExecutionId.Project) - assert.EqualValues(t, input.Domain, parentNodeExecutionID.ExecutionId.Domain) - assert.EqualValues(t, input.Name, parentNodeExecutionID.ExecutionId.Name) + assert.EqualValues(t, input.Project, parentNodeExecutionID.GetExecutionId().GetProject()) + assert.EqualValues(t, input.Domain, parentNodeExecutionID.GetExecutionId().GetDomain()) + assert.EqualValues(t, input.Name, parentNodeExecutionID.GetExecutionId().GetName()) spec := &admin.ExecutionSpec{ Metadata: &admin.ExecutionMetadata{ Nesting: 1, @@ -463,13 +463,13 @@ func TestCreateExecutionFromWorkflowNode(t *testing.T) { var spec admin.ExecutionSpec err := proto.Unmarshal(input.Spec, &spec) assert.NoError(t, err) - assert.Equal(t, admin.ExecutionMetadata_CHILD_WORKFLOW, spec.Metadata.Mode) - assert.True(t, proto.Equal(parentNodeExecutionID, spec.Metadata.ParentNodeExecution)) + assert.Equal(t, admin.ExecutionMetadata_CHILD_WORKFLOW, spec.GetMetadata().GetMode()) + assert.True(t, proto.Equal(parentNodeExecutionID, spec.GetMetadata().GetParentNodeExecution())) assert.EqualValues(t, input.ParentNodeExecutionID, 1) assert.EqualValues(t, input.SourceExecutionID, 2) - assert.Equal(t, 2, int(spec.Metadata.Nesting)) - assert.Equal(t, principal, spec.Metadata.Principal) - assert.Equal(t, executionClusterLabel, spec.ExecutionClusterLabel.Value) + assert.Equal(t, 2, int(spec.GetMetadata().GetNesting())) + assert.Equal(t, principal, spec.GetMetadata().GetPrincipal()) + assert.Equal(t, executionClusterLabel, spec.GetExecutionClusterLabel().GetValue()) assert.Equal(t, principal, input.User) return nil }, @@ -505,14 +505,14 @@ func TestCreateExecution_NoAssignedName(t *testing.T) { setDefaultLpCallbackForExecTest(repository) repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback( func(ctx context.Context, input models.Execution) error { - assert.Equal(t, executionIdentifier.Project, input.Project) - assert.Equal(t, executionIdentifier.Domain, input.Domain) + assert.Equal(t, executionIdentifier.GetProject(), input.Project) + assert.Equal(t, executionIdentifier.GetDomain(), input.Domain) assert.NotEmpty(t, input.Name) return nil }) mockExecutor := workflowengineMocks.WorkflowExecutor{} mockExecutor.OnExecuteMatch(mock.Anything, mock.MatchedBy(func(data workflowengineInterfaces.ExecutionData) bool { - return len(data.ExecutionID.Name) > 0 + return len(data.ExecutionID.GetName()) > 0 })).Return(workflowengineInterfaces.ExecutionResponse{ Cluster: testCluster, }, nil) @@ -530,9 +530,9 @@ func TestCreateExecution_NoAssignedName(t *testing.T) { Id: &executionIdentifier, } assert.Nil(t, err) - assert.Equal(t, expectedResponse.Id.Project, response.Id.Project) - assert.Equal(t, expectedResponse.Id.Domain, response.Id.Domain) - assert.NotEmpty(t, response.Id.Name) + assert.Equal(t, expectedResponse.GetId().GetProject(), response.GetId().GetProject()) + assert.Equal(t, expectedResponse.GetId().GetDomain(), response.GetId().GetDomain()) + assert.NotEmpty(t, response.GetId().GetName()) } func TestCreateExecution_TaggedQueue(t *testing.T) { @@ -558,11 +558,11 @@ func TestCreateExecution_TaggedQueue(t *testing.T) { mockExecutor := workflowengineMocks.WorkflowExecutor{} mockExecutor.OnExecuteMatch(mock.Anything, mock.MatchedBy(func(data workflowengineInterfaces.ExecutionData) bool { - assert.NotEmpty(t, data.WorkflowClosure.Tasks) - for _, task := range data.WorkflowClosure.Tasks { - assert.Len(t, task.Template.GetContainer().Config, 1) - assert.Contains(t, childContainerQueueKey, task.Template.GetContainer().Config[0].Key) - assert.Contains(t, "dynamic Q", task.Template.GetContainer().Config[0].Value) + assert.NotEmpty(t, data.WorkflowClosure.GetTasks()) + for _, task := range data.WorkflowClosure.GetTasks() { + assert.Len(t, task.GetTemplate().GetContainer().GetConfig(), 1) + assert.Contains(t, childContainerQueueKey, task.GetTemplate().GetContainer().GetConfig()[0].GetKey()) + assert.Contains(t, "dynamic Q", task.GetTemplate().GetContainer().GetConfig()[0].GetValue()) } return true })).Return(workflowengineInterfaces.ExecutionResponse{ @@ -720,14 +720,14 @@ func TestCreateExecutionVerifyDbModel(t *testing.T) { if err != nil { return err } - assert.Nil(t, specValue.Inputs) + assert.Nil(t, specValue.GetInputs()) var closureValue admin.ExecutionClosure err = proto.Unmarshal(input.Closure, &closureValue) if err != nil { return err } - assert.Nil(t, closureValue.ComputedInputs) + assert.Nil(t, closureValue.GetComputedInputs()) var userInputs, inputs core.LiteralMap if err := storageClient.ReadProtobuf(ctx, input.UserInputsURI, &userInputs); err != nil { @@ -737,19 +737,19 @@ func TestCreateExecutionVerifyDbModel(t *testing.T) { return err } fooValue := coreutils.MustMakeLiteral("foo-value-1") - assert.Equal(t, 1, len(userInputs.Literals)) - assert.EqualValues(t, userInputs.Literals["foo"], fooValue) + assert.Equal(t, 1, len(userInputs.GetLiterals())) + assert.EqualValues(t, userInputs.GetLiterals()["foo"], fooValue) barValue := coreutils.MustMakeLiteral("bar-value") - assert.Equal(t, len(inputs.Literals), 2) - assert.EqualValues(t, inputs.Literals["foo"], fooValue) - assert.EqualValues(t, inputs.Literals["bar"], barValue) - assert.Equal(t, core.WorkflowExecution_UNDEFINED, closureValue.Phase) + assert.Equal(t, len(inputs.GetLiterals()), 2) + assert.EqualValues(t, inputs.GetLiterals()["foo"], fooValue) + assert.EqualValues(t, inputs.GetLiterals()["bar"], barValue) + assert.Equal(t, core.WorkflowExecution_UNDEFINED, closureValue.GetPhase()) assert.Equal(t, createdAt, *input.ExecutionCreatedAt) - assert.Equal(t, 1, len(closureValue.Notifications)) - assert.Equal(t, 1, len(closureValue.Notifications[0].Phases)) - assert.Equal(t, request.Spec.GetNotifications().Notifications[0].Phases[0], closureValue.Notifications[0].Phases[0]) - assert.IsType(t, &admin.Notification_Slack{}, closureValue.Notifications[0].GetType()) - assert.Equal(t, request.Spec.GetNotifications().Notifications[0].GetSlack().RecipientsEmail, closureValue.Notifications[0].GetSlack().RecipientsEmail) + assert.Equal(t, 1, len(closureValue.GetNotifications())) + assert.Equal(t, 1, len(closureValue.GetNotifications()[0].GetPhases())) + assert.Equal(t, request.GetSpec().GetNotifications().GetNotifications()[0].GetPhases()[0], closureValue.GetNotifications()[0].GetPhases()[0]) + assert.IsType(t, &admin.Notification_Slack{}, closureValue.GetNotifications()[0].GetType()) + assert.Equal(t, request.GetSpec().GetNotifications().GetNotifications()[0].GetSlack().GetRecipientsEmail(), closureValue.GetNotifications()[0].GetSlack().GetRecipientsEmail()) return nil } @@ -766,7 +766,7 @@ func TestCreateExecutionVerifyDbModel(t *testing.T) { response, err := execManager.CreateExecution(context.Background(), request, requestedAt) assert.Nil(t, err) - assert.True(t, proto.Equal(&executionIdentifier, response.Id)) + assert.True(t, proto.Equal(&executionIdentifier, response.GetId())) } func TestCreateExecutionDefaultNotifications(t *testing.T) { @@ -790,10 +790,10 @@ func TestCreateExecutionDefaultNotifications(t *testing.T) { return err } - assert.Equal(t, 1, len(closureValue.Notifications)) - assert.Equal(t, 1, len(closureValue.Notifications[0].Phases)) - assert.Equal(t, core.WorkflowExecution_SUCCEEDED, closureValue.Notifications[0].Phases[0]) - assert.IsType(t, &admin.Notification_Email{}, closureValue.Notifications[0].GetType()) + assert.Equal(t, 1, len(closureValue.GetNotifications())) + assert.Equal(t, 1, len(closureValue.GetNotifications()[0].GetPhases())) + assert.Equal(t, core.WorkflowExecution_SUCCEEDED, closureValue.GetNotifications()[0].GetPhases()[0]) + assert.IsType(t, &admin.Notification_Email{}, closureValue.GetNotifications()[0].GetType()) return nil } @@ -811,7 +811,7 @@ func TestCreateExecutionDefaultNotifications(t *testing.T) { Project: "project", Domain: "domain", Name: "name", - }, response.Id)) + }, response.GetId())) } func TestCreateExecutionDisableNotifications(t *testing.T) { @@ -833,7 +833,7 @@ func TestCreateExecutionDisableNotifications(t *testing.T) { return err } - assert.Empty(t, closureValue.Notifications) + assert.Empty(t, closureValue.GetNotifications()) return nil } repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(exCreateFunc) @@ -850,7 +850,7 @@ func TestCreateExecutionDisableNotifications(t *testing.T) { Project: "project", Domain: "domain", Name: "name", - }, response.Id)) + }, response.GetId())) } func TestCreateExecutionNoNotifications(t *testing.T) { @@ -868,7 +868,7 @@ func TestCreateExecutionNoNotifications(t *testing.T) { lpSpec.EntityMetadata.Notifications = nil lpSpecBytes, _ := proto.Marshal(lpSpec) lpClosure := admin.LaunchPlanClosure{ - ExpectedInputs: lpSpec.DefaultInputs, + ExpectedInputs: lpSpec.GetDefaultInputs(), } lpClosureBytes, _ := proto.Marshal(&lpClosure) @@ -920,7 +920,7 @@ func TestCreateExecutionNoNotifications(t *testing.T) { Project: "project", Domain: "domain", Name: "name", - }, response.Id)) + }, response.GetId())) } func TestCreateExecutionDynamicLabelsAndAnnotations(t *testing.T) { @@ -1209,8 +1209,8 @@ func TestCreateExecutionWithEnvs(t *testing.T) { assert.Equal(t, uint(0), input.TaskID) } if len(tt.envs) != 0 { - assert.Equal(t, tt.envs[0].Key, spec.GetEnvs().Values[0].Key) - assert.Equal(t, tt.envs[0].Value, spec.GetEnvs().Values[0].Value) + assert.Equal(t, tt.envs[0].GetKey(), spec.GetEnvs().GetValues()[0].GetKey()) + assert.Equal(t, tt.envs[0].GetValue(), spec.GetEnvs().GetValues()[0].GetValue()) } else { assert.Nil(t, spec.GetEnvs().GetValues()) } @@ -1244,7 +1244,7 @@ func TestCreateExecution_CustomNamespaceMappingConfig(t *testing.T) { var spec admin.ExecutionSpec err := proto.Unmarshal(input.Spec, &spec) assert.NoError(t, err) - assert.Equal(t, spec.GetMetadata().GetSystemMetadata().Namespace, "project") + assert.Equal(t, spec.GetMetadata().GetSystemMetadata().GetNamespace(), "project") return nil } @@ -1272,7 +1272,7 @@ func TestCreateExecution_CustomNamespaceMappingConfig(t *testing.T) { response, err := execManager.CreateExecution(context.Background(), request, requestedAt) assert.Nil(t, err) - assert.True(t, proto.Equal(&executionIdentifier, response.Id)) + assert.True(t, proto.Equal(&executionIdentifier, response.GetId())) } func makeExecutionGetFunc( @@ -1341,7 +1341,7 @@ func makeExecutionInterruptibleGetFunc( request.Spec.Interruptible = &wrappers.BoolValue{Value: *interruptible} } - specBytes, err := proto.Marshal(request.Spec) + specBytes, err := proto.Marshal(request.GetSpec()) assert.Nil(t, err) return models.Execution{ @@ -1374,7 +1374,7 @@ func makeExecutionOverwriteCacheGetFunc( request := testutils.GetExecutionRequest() request.Spec.OverwriteCache = overwriteCache - specBytes, err := proto.Marshal(request.Spec) + specBytes, err := proto.Marshal(request.GetSpec()) assert.Nil(t, err) return models.Execution{ @@ -1407,7 +1407,7 @@ func makeExecutionWithEnvs( request := testutils.GetExecutionRequest() request.Spec.Envs.Values = envs - specBytes, err := proto.Marshal(request.Spec) + specBytes, err := proto.Marshal(request.GetSpec()) assert.Nil(t, err) return models.Execution{ @@ -1460,7 +1460,7 @@ func TestRelaunchExecution(t *testing.T) { var spec admin.ExecutionSpec err := proto.Unmarshal(input.Spec, &spec) assert.Nil(t, err) - assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.Metadata.Mode) + assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.GetMetadata().GetMode()) assert.Equal(t, int32(admin.ExecutionMetadata_RELAUNCH), input.Mode) return nil } @@ -1600,7 +1600,7 @@ func TestRelaunchExecutionInterruptibleOverride(t *testing.T) { var spec admin.ExecutionSpec err := proto.Unmarshal(input.Spec, &spec) assert.Nil(t, err) - assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.Metadata.Mode) + assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.GetMetadata().GetMode()) assert.Equal(t, int32(admin.ExecutionMetadata_RELAUNCH), input.Mode) assert.NotNil(t, spec.GetInterruptible()) assert.True(t, spec.GetInterruptible().GetValue()) @@ -1652,7 +1652,7 @@ func TestRelaunchExecutionOverwriteCacheOverride(t *testing.T) { var spec admin.ExecutionSpec err := proto.Unmarshal(input.Spec, &spec) assert.Nil(t, err) - assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.Metadata.Mode) + assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.GetMetadata().GetMode()) assert.Equal(t, int32(admin.ExecutionMetadata_RELAUNCH), input.Mode) assert.True(t, spec.GetOverwriteCache()) return nil @@ -1687,7 +1687,7 @@ func TestRelaunchExecutionOverwriteCacheOverride(t *testing.T) { var spec admin.ExecutionSpec err := proto.Unmarshal(input.Spec, &spec) assert.Nil(t, err) - assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.Metadata.Mode) + assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.GetMetadata().GetMode()) assert.Equal(t, int32(admin.ExecutionMetadata_RELAUNCH), input.Mode) assert.False(t, spec.GetOverwriteCache()) return nil @@ -1722,7 +1722,7 @@ func TestRelaunchExecutionOverwriteCacheOverride(t *testing.T) { var spec admin.ExecutionSpec err := proto.Unmarshal(input.Spec, &spec) assert.Nil(t, err) - assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.Metadata.Mode) + assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.GetMetadata().GetMode()) assert.Equal(t, int32(admin.ExecutionMetadata_RELAUNCH), input.Mode) assert.False(t, spec.GetOverwriteCache()) return nil @@ -1774,11 +1774,11 @@ func TestRelaunchExecutionEnvsOverride(t *testing.T) { var spec admin.ExecutionSpec err := proto.Unmarshal(input.Spec, &spec) assert.Nil(t, err) - assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.Metadata.Mode) + assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.GetMetadata().GetMode()) assert.Equal(t, int32(admin.ExecutionMetadata_RELAUNCH), input.Mode) assert.NotNil(t, spec.GetEnvs()) - assert.Equal(t, spec.GetEnvs().Values[0].Key, env[0].Key) - assert.Equal(t, spec.GetEnvs().Values[0].Value, env[0].Value) + assert.Equal(t, spec.GetEnvs().GetValues()[0].GetKey(), env[0].GetKey()) + assert.Equal(t, spec.GetEnvs().GetValues()[0].GetValue(), env[0].GetValue()) return nil } repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(exCreateFunc) @@ -1825,7 +1825,7 @@ func TestRecoverExecution(t *testing.T) { var spec admin.ExecutionSpec err := proto.Unmarshal(input.Spec, &spec) assert.Nil(t, err) - assert.Equal(t, admin.ExecutionMetadata_RECOVERED, spec.Metadata.Mode) + assert.Equal(t, admin.ExecutionMetadata_RECOVERED, spec.GetMetadata().GetMode()) assert.Equal(t, int32(admin.ExecutionMetadata_RECOVERED), input.Mode) return nil } @@ -1904,7 +1904,7 @@ func TestRecoverExecution_RecoveredChildNode(t *testing.T) { var spec admin.ExecutionSpec err := proto.Unmarshal(input.Spec, &spec) assert.Nil(t, err) - assert.Equal(t, admin.ExecutionMetadata_RECOVERED, spec.Metadata.Mode) + assert.Equal(t, admin.ExecutionMetadata_RECOVERED, spec.GetMetadata().GetMode()) assert.Equal(t, int32(admin.ExecutionMetadata_RECOVERED), input.Mode) assert.Equal(t, parentNodeDatabaseID, input.ParentNodeExecutionID) assert.Equal(t, referencedExecutionID, input.SourceExecutionID) @@ -2067,7 +2067,7 @@ func TestRecoverExecutionInterruptibleOverride(t *testing.T) { var spec admin.ExecutionSpec err := proto.Unmarshal(input.Spec, &spec) assert.Nil(t, err) - assert.Equal(t, admin.ExecutionMetadata_RECOVERED, spec.Metadata.Mode) + assert.Equal(t, admin.ExecutionMetadata_RECOVERED, spec.GetMetadata().GetMode()) assert.Equal(t, int32(admin.ExecutionMetadata_RECOVERED), input.Mode) assert.NotNil(t, spec.GetInterruptible()) assert.True(t, spec.GetInterruptible().GetValue()) @@ -2129,7 +2129,7 @@ func TestRecoverExecutionOverwriteCacheOverride(t *testing.T) { var spec admin.ExecutionSpec err := proto.Unmarshal(input.Spec, &spec) assert.Nil(t, err) - assert.Equal(t, admin.ExecutionMetadata_RECOVERED, spec.Metadata.Mode) + assert.Equal(t, admin.ExecutionMetadata_RECOVERED, spec.GetMetadata().GetMode()) assert.Equal(t, int32(admin.ExecutionMetadata_RECOVERED), input.Mode) assert.True(t, spec.GetOverwriteCache()) return nil @@ -2189,11 +2189,11 @@ func TestRecoverExecutionEnvsOverride(t *testing.T) { var spec admin.ExecutionSpec err := proto.Unmarshal(input.Spec, &spec) assert.Nil(t, err) - assert.Equal(t, admin.ExecutionMetadata_RECOVERED, spec.Metadata.Mode) + assert.Equal(t, admin.ExecutionMetadata_RECOVERED, spec.GetMetadata().GetMode()) assert.Equal(t, int32(admin.ExecutionMetadata_RECOVERED), input.Mode) assert.NotNil(t, spec.GetEnvs()) - assert.Equal(t, spec.GetEnvs().GetValues()[0].Key, env[0].Key) - assert.Equal(t, spec.GetEnvs().GetValues()[0].Value, env[0].Value) + assert.Equal(t, spec.GetEnvs().GetValues()[0].GetKey(), env[0].GetKey()) + assert.Equal(t, spec.GetEnvs().GetValues()[0].GetValue(), env[0].GetValue()) return nil } repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(exCreateFunc) @@ -2843,9 +2843,9 @@ func TestGetExecution(t *testing.T) { Id: &executionIdentifier, }) assert.NoError(t, err) - assert.True(t, proto.Equal(&executionIdentifier, execution.Id)) - assert.True(t, proto.Equal(getExpectedSpec(), execution.Spec)) - assert.True(t, proto.Equal(&closure, execution.Closure)) + assert.True(t, proto.Equal(&executionIdentifier, execution.GetId())) + assert.True(t, proto.Equal(getExpectedSpec(), execution.GetSpec())) + assert.True(t, proto.Equal(&closure, execution.GetClosure())) } func TestGetExecution_DatabaseError(t *testing.T) { @@ -3070,18 +3070,18 @@ func TestListExecutions(t *testing.T) { }) assert.NoError(t, err) assert.NotNil(t, executionList) - assert.Len(t, executionList.Executions, 2) + assert.Len(t, executionList.GetExecutions(), 2) - for idx, execution := range executionList.Executions { - assert.Equal(t, projectValue, execution.Id.Project) - assert.Equal(t, domainValue, execution.Id.Domain) + for idx, execution := range executionList.GetExecutions() { + assert.Equal(t, projectValue, execution.GetId().GetProject()) + assert.Equal(t, domainValue, execution.GetId().GetDomain()) if idx == 0 { - assert.Equal(t, "my awesome execution", execution.Id.Name) + assert.Equal(t, "my awesome execution", execution.GetId().GetName()) } - assert.True(t, proto.Equal(getExpectedSpec(), execution.Spec)) - assert.True(t, proto.Equal(&closure, execution.Closure)) + assert.True(t, proto.Equal(getExpectedSpec(), execution.GetSpec())) + assert.True(t, proto.Equal(&closure, execution.GetClosure())) } - assert.Empty(t, executionList.Token) + assert.Empty(t, executionList.GetToken()) } func TestListExecutions_MissingParameters(t *testing.T) { @@ -3212,7 +3212,7 @@ func TestExecutionManager_PublishNotifications(t *testing.T) { }, } var execClosure = &admin.ExecutionClosure{ - Notifications: testutils.GetExecutionRequest().Spec.GetNotifications().Notifications, + Notifications: testutils.GetExecutionRequest().GetSpec().GetNotifications().GetNotifications(), WorkflowId: &core.Identifier{ ResourceType: core.ResourceType_WORKFLOW, Project: "wf_project", @@ -3248,8 +3248,8 @@ func TestExecutionManager_PublishNotifications(t *testing.T) { }, }, } - execClosure.Notifications = append(execClosure.Notifications, extraNotifications[0]) - execClosure.Notifications = append(execClosure.Notifications, extraNotifications[1]) + execClosure.Notifications = append(execClosure.GetNotifications(), extraNotifications[0]) + execClosure.Notifications = append(execClosure.GetNotifications(), extraNotifications[1]) execClosureBytes, _ := proto.Marshal(execClosure) executionModel := models.Execution{ @@ -3351,7 +3351,7 @@ func TestExecutionManager_TestExecutionManager_PublishNotificationsTransformErro }, } var execClosure = &admin.ExecutionClosure{ - Notifications: testutils.GetExecutionRequest().Spec.GetNotifications().Notifications, + Notifications: testutils.GetExecutionRequest().GetSpec().GetNotifications().GetNotifications(), WorkflowId: &core.Identifier{ ResourceType: core.ResourceType_WORKFLOW, Project: "wf_project", @@ -3402,7 +3402,7 @@ func TestExecutionManager_PublishNotificationsNoPhaseMatch(t *testing.T) { }, } var execClosure = &admin.ExecutionClosure{ - Notifications: testutils.GetExecutionRequest().Spec.GetNotifications().Notifications, + Notifications: testutils.GetExecutionRequest().GetSpec().GetNotifications().GetNotifications(), } execClosureBytes, _ := proto.Marshal(execClosure) executionModel := models.Execution{ @@ -3723,12 +3723,12 @@ func TestAddPluginOverrides(t *testing.T) { assert.NoError(t, err) assert.Len(t, taskPluginOverrides, 2) for _, override := range taskPluginOverrides { - if override.TaskType == "python" { - assert.EqualValues(t, []string{"plugin a"}, override.PluginId) - } else if override.TaskType == "hive" { - assert.EqualValues(t, []string{"plugin b"}, override.PluginId) + if override.GetTaskType() == "python" { + assert.EqualValues(t, []string{"plugin a"}, override.GetPluginId()) + } else if override.GetTaskType() == "hive" { + assert.EqualValues(t, []string{"plugin b"}, override.GetPluginId()) } else { - t.Errorf("Unexpected task type [%s] plugin override committed to db", override.TaskType) + t.Errorf("Unexpected task type [%s] plugin override committed to db", override.GetTaskType()) } } } @@ -3788,9 +3788,9 @@ func TestGetExecution_Legacy(t *testing.T) { Id: &executionIdentifier, }) assert.NoError(t, err) - assert.True(t, proto.Equal(&executionIdentifier, execution.Id)) - assert.True(t, proto.Equal(getExpectedLegacySpec(), execution.Spec)) - assert.True(t, proto.Equal(getLegacyClosure(), execution.Closure)) + assert.True(t, proto.Equal(&executionIdentifier, execution.GetId())) + assert.True(t, proto.Equal(getExpectedLegacySpec(), execution.GetSpec())) + assert.True(t, proto.Equal(getLegacyClosure(), execution.GetClosure())) } func TestGetExecutionData_LegacyModel(t *testing.T) { @@ -3870,7 +3870,7 @@ func TestGetExecutionData_LegacyModel(t *testing.T) { var inputs core.LiteralMap err = storageClient.ReadProtobuf(context.Background(), storage.DataReference("s3://bucket/metadata/project/domain/name/inputs"), &inputs) assert.Nil(t, err) - assert.True(t, proto.Equal(&inputs, closure.ComputedInputs)) + assert.True(t, proto.Equal(&inputs, closure.GetComputedInputs())) } func TestCreateExecution_LegacyClient(t *testing.T) { @@ -3937,10 +3937,10 @@ func TestRelaunchExecution_LegacyModel(t *testing.T) { var spec admin.ExecutionSpec err := proto.Unmarshal(input.Spec, &spec) assert.Nil(t, err) - assert.Equal(t, "default_raw_output", spec.RawOutputDataConfig.OutputLocationPrefix) - assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.Metadata.Mode) + assert.Equal(t, "default_raw_output", spec.GetRawOutputDataConfig().GetOutputLocationPrefix()) + assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.GetMetadata().GetMode()) assert.Equal(t, int32(admin.ExecutionMetadata_RELAUNCH), input.Mode) - assert.True(t, proto.Equal(spec.Inputs, getLegacySpec().Inputs)) + assert.True(t, proto.Equal(spec.GetInputs(), getLegacySpec().GetInputs())) return nil } repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(exCreateFunc) @@ -3971,12 +3971,12 @@ func TestRelaunchExecution_LegacyModel(t *testing.T) { var userInputs core.LiteralMap err = storageClient.ReadProtobuf(context.Background(), "s3://bucket/metadata/project/domain/relaunchy/user_inputs", &userInputs) assert.Nil(t, err) - assert.True(t, proto.Equal(&userInputs, getLegacySpec().Inputs)) + assert.True(t, proto.Equal(&userInputs, getLegacySpec().GetInputs())) var inputs core.LiteralMap err = storageClient.ReadProtobuf(context.Background(), "s3://bucket/metadata/project/domain/relaunchy/inputs", &inputs) assert.Nil(t, err) - assert.True(t, proto.Equal(&inputs, existingClosure.ComputedInputs)) + assert.True(t, proto.Equal(&inputs, existingClosure.GetComputedInputs())) } func TestListExecutions_LegacyModel(t *testing.T) { @@ -4052,18 +4052,18 @@ func TestListExecutions_LegacyModel(t *testing.T) { }) assert.NoError(t, err) assert.NotNil(t, executionList) - assert.Len(t, executionList.Executions, 2) + assert.Len(t, executionList.GetExecutions(), 2) - for idx, execution := range executionList.Executions { - assert.Equal(t, projectValue, execution.Id.Project) - assert.Equal(t, domainValue, execution.Id.Domain) + for idx, execution := range executionList.GetExecutions() { + assert.Equal(t, projectValue, execution.GetId().GetProject()) + assert.Equal(t, domainValue, execution.GetId().GetDomain()) if idx == 0 { - assert.Equal(t, "my awesome execution", execution.Id.Name) + assert.Equal(t, "my awesome execution", execution.GetId().GetName()) } - assert.True(t, proto.Equal(spec, execution.Spec)) - assert.True(t, proto.Equal(&closure, execution.Closure)) + assert.True(t, proto.Equal(spec, execution.GetSpec())) + assert.True(t, proto.Equal(&closure, execution.GetClosure())) } - assert.Empty(t, executionList.Token) + assert.Empty(t, executionList.GetToken()) } func TestSetDefaults(t *testing.T) { @@ -4148,7 +4148,7 @@ func TestSetDefaults(t *testing.T) { }, }, }, - task.Template.GetContainer()), fmt.Sprintf("%+v", task.Template.GetContainer())) + task.GetTemplate().GetContainer()), fmt.Sprintf("%+v", task.GetTemplate().GetContainer())) } func TestSetDefaults_MissingRequests_ExistingRequestsPreserved(t *testing.T) { @@ -4224,7 +4224,7 @@ func TestSetDefaults_MissingRequests_ExistingRequestsPreserved(t *testing.T) { }, }, }, - task.Template.GetContainer()), fmt.Sprintf("%+v", task.Template.GetContainer())) + task.GetTemplate().GetContainer()), fmt.Sprintf("%+v", task.GetTemplate().GetContainer())) } func TestSetDefaults_OptionalRequiredResources(t *testing.T) { @@ -4288,7 +4288,7 @@ func TestSetDefaults_OptionalRequiredResources(t *testing.T) { }, }, }, - task.Template.GetContainer()), fmt.Sprintf("%+v", task.Template.GetContainer())) + task.GetTemplate().GetContainer()), fmt.Sprintf("%+v", task.GetTemplate().GetContainer())) }) t.Run("respect non-required resources when defaults exist in config", func(t *testing.T) { @@ -4336,7 +4336,7 @@ func TestSetDefaults_OptionalRequiredResources(t *testing.T) { }, }, }, - task.Template.GetContainer()), fmt.Sprintf("%+v", task.Template.GetContainer())) + task.GetTemplate().GetContainer()), fmt.Sprintf("%+v", task.GetTemplate().GetContainer())) }) } @@ -4472,7 +4472,7 @@ func TestCreateSingleTaskExecution(t *testing.T) { }, input.ExecutionKey) assert.Equal(t, "task", input.LaunchEntity) assert.Equal(t, "UNDEFINED", input.Phase) - assert.True(t, proto.Equal(taskIdentifier, spec.LaunchPlan)) + assert.True(t, proto.Equal(taskIdentifier, spec.GetLaunchPlan())) return nil }) @@ -4583,10 +4583,10 @@ func TestGetExecutionConfigOverrides(t *testing.T) { request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) { // two requests will be made, one with empty domain and one with filled in domain assert.Contains(t, []managerInterfaces.ResourceRequest{{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG, - }, {Project: workflowIdentifier.Project, + }, {Project: workflowIdentifier.GetProject(), Domain: "", ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG}, }, request) @@ -4631,8 +4631,8 @@ func TestGetExecutionConfigOverrides(t *testing.T) { t.Run("request with full config", func(t *testing.T) { request := &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{ Labels: &admin.Labels{Values: requestLabels}, Annotations: &admin.Annotations{Values: requestAnnotations}, @@ -4656,20 +4656,20 @@ func TestGetExecutionConfigOverrides(t *testing.T) { ctx := identityContext.WithContext(context.Background()) execConfig, err := executionManager.getExecutionConfig(ctx, request, nil) assert.NoError(t, err) - assert.Equal(t, requestMaxParallelism, execConfig.MaxParallelism) - assert.Equal(t, requestK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) - assert.Equal(t, requestInterruptible, execConfig.Interruptible.Value) - assert.Equal(t, requestOverwriteCache, execConfig.OverwriteCache) - assert.Equal(t, requestOutputLocationPrefix, execConfig.RawOutputDataConfig.OutputLocationPrefix) - assert.Equal(t, requestLabels, execConfig.GetLabels().Values) - assert.Equal(t, requestAnnotations, execConfig.GetAnnotations().Values) + assert.Equal(t, requestMaxParallelism, execConfig.GetMaxParallelism()) + assert.Equal(t, requestK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount()) + assert.Equal(t, requestInterruptible, execConfig.GetInterruptible().GetValue()) + assert.Equal(t, requestOverwriteCache, execConfig.GetOverwriteCache()) + assert.Equal(t, requestOutputLocationPrefix, execConfig.GetRawOutputDataConfig().GetOutputLocationPrefix()) + assert.Equal(t, requestLabels, execConfig.GetLabels().GetValues()) + assert.Equal(t, requestAnnotations, execConfig.GetAnnotations().GetValues()) assert.Equal(t, "yeee", execConfig.GetSecurityContext().GetRunAs().GetExecutionIdentity()) - assert.Equal(t, requestEnvironmentVariables, execConfig.GetEnvs().Values) + assert.Equal(t, requestEnvironmentVariables, execConfig.GetEnvs().GetValues()) }) t.Run("request with partial config", func(t *testing.T) { request := &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{ Labels: &admin.Labels{Values: requestLabels}, RawOutputDataConfig: &admin.RawOutputDataConfig{ @@ -4697,19 +4697,19 @@ func TestGetExecutionConfigOverrides(t *testing.T) { } execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan) assert.NoError(t, err) - assert.Equal(t, requestMaxParallelism, execConfig.MaxParallelism) - assert.Equal(t, launchPlanInterruptible, execConfig.Interruptible.Value) - assert.Equal(t, launchPlanOverwriteCache, execConfig.OverwriteCache) - assert.True(t, proto.Equal(launchPlan.Spec.SecurityContext, execConfig.SecurityContext)) - assert.True(t, proto.Equal(launchPlan.Spec.Annotations, execConfig.Annotations)) - assert.Equal(t, requestOutputLocationPrefix, execConfig.RawOutputDataConfig.OutputLocationPrefix) - assert.Equal(t, requestLabels, execConfig.GetLabels().Values) - assert.Equal(t, launchPlanEnvironmentVariables, execConfig.GetEnvs().Values) + assert.Equal(t, requestMaxParallelism, execConfig.GetMaxParallelism()) + assert.Equal(t, launchPlanInterruptible, execConfig.GetInterruptible().GetValue()) + assert.Equal(t, launchPlanOverwriteCache, execConfig.GetOverwriteCache()) + assert.True(t, proto.Equal(launchPlan.GetSpec().GetSecurityContext(), execConfig.GetSecurityContext())) + assert.True(t, proto.Equal(launchPlan.GetSpec().GetAnnotations(), execConfig.GetAnnotations())) + assert.Equal(t, requestOutputLocationPrefix, execConfig.GetRawOutputDataConfig().GetOutputLocationPrefix()) + assert.Equal(t, requestLabels, execConfig.GetLabels().GetValues()) + assert.Equal(t, launchPlanEnvironmentVariables, execConfig.GetEnvs().GetValues()) }) t.Run("request with empty security context", func(t *testing.T) { request := &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{ SecurityContext: &core.SecurityContext{ RunAs: &core.Identity{ @@ -4737,18 +4737,18 @@ func TestGetExecutionConfigOverrides(t *testing.T) { } execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan) assert.NoError(t, err) - assert.Equal(t, launchPlanMaxParallelism, execConfig.MaxParallelism) - assert.Equal(t, launchPlanInterruptible, execConfig.Interruptible.Value) - assert.Equal(t, launchPlanOverwriteCache, execConfig.OverwriteCache) - assert.Equal(t, launchPlanK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) - assert.Equal(t, launchPlanOutputLocationPrefix, execConfig.RawOutputDataConfig.OutputLocationPrefix) - assert.Equal(t, launchPlanLabels, execConfig.GetLabels().Values) - assert.Equal(t, launchPlanEnvironmentVariables, execConfig.GetEnvs().Values) + assert.Equal(t, launchPlanMaxParallelism, execConfig.GetMaxParallelism()) + assert.Equal(t, launchPlanInterruptible, execConfig.GetInterruptible().GetValue()) + assert.Equal(t, launchPlanOverwriteCache, execConfig.GetOverwriteCache()) + assert.Equal(t, launchPlanK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount()) + assert.Equal(t, launchPlanOutputLocationPrefix, execConfig.GetRawOutputDataConfig().GetOutputLocationPrefix()) + assert.Equal(t, launchPlanLabels, execConfig.GetLabels().GetValues()) + assert.Equal(t, launchPlanEnvironmentVariables, execConfig.GetEnvs().GetValues()) }) t.Run("request with no config", func(t *testing.T) { request := &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{}, } launchPlan := &admin.LaunchPlan{ @@ -4771,19 +4771,19 @@ func TestGetExecutionConfigOverrides(t *testing.T) { } execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan) assert.NoError(t, err) - assert.Equal(t, launchPlanMaxParallelism, execConfig.MaxParallelism) - assert.Equal(t, launchPlanInterruptible, execConfig.Interruptible.Value) - assert.Equal(t, launchPlanOverwriteCache, execConfig.OverwriteCache) - assert.Equal(t, launchPlanK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) - assert.Equal(t, launchPlanOutputLocationPrefix, execConfig.RawOutputDataConfig.OutputLocationPrefix) - assert.Equal(t, launchPlanLabels, execConfig.GetLabels().Values) - assert.Equal(t, launchPlanAnnotations, execConfig.GetAnnotations().Values) - assert.Equal(t, launchPlanEnvironmentVariables, execConfig.GetEnvs().Values) + assert.Equal(t, launchPlanMaxParallelism, execConfig.GetMaxParallelism()) + assert.Equal(t, launchPlanInterruptible, execConfig.GetInterruptible().GetValue()) + assert.Equal(t, launchPlanOverwriteCache, execConfig.GetOverwriteCache()) + assert.Equal(t, launchPlanK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount()) + assert.Equal(t, launchPlanOutputLocationPrefix, execConfig.GetRawOutputDataConfig().GetOutputLocationPrefix()) + assert.Equal(t, launchPlanLabels, execConfig.GetLabels().GetValues()) + assert.Equal(t, launchPlanAnnotations, execConfig.GetAnnotations().GetValues()) + assert.Equal(t, launchPlanEnvironmentVariables, execConfig.GetEnvs().GetValues()) }) t.Run("launchplan with partial config", func(t *testing.T) { request := &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{}, } launchPlan := &admin.LaunchPlan{ @@ -4803,18 +4803,18 @@ func TestGetExecutionConfigOverrides(t *testing.T) { } execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan) assert.NoError(t, err) - assert.Equal(t, launchPlanMaxParallelism, execConfig.MaxParallelism) - assert.Equal(t, rmInterruptible, execConfig.Interruptible.Value) - assert.Equal(t, rmOverwriteCache, execConfig.OverwriteCache) - assert.Equal(t, launchPlanK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) - assert.Equal(t, launchPlanOutputLocationPrefix, execConfig.RawOutputDataConfig.OutputLocationPrefix) - assert.Equal(t, launchPlanLabels, execConfig.GetLabels().Values) - assert.Equal(t, launchPlanAnnotations, execConfig.GetAnnotations().Values) + assert.Equal(t, launchPlanMaxParallelism, execConfig.GetMaxParallelism()) + assert.Equal(t, rmInterruptible, execConfig.GetInterruptible().GetValue()) + assert.Equal(t, rmOverwriteCache, execConfig.GetOverwriteCache()) + assert.Equal(t, launchPlanK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount()) + assert.Equal(t, launchPlanOutputLocationPrefix, execConfig.GetRawOutputDataConfig().GetOutputLocationPrefix()) + assert.Equal(t, launchPlanLabels, execConfig.GetLabels().GetValues()) + assert.Equal(t, launchPlanAnnotations, execConfig.GetAnnotations().GetValues()) }) t.Run("launchplan with no config", func(t *testing.T) { request := &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{}, } launchPlan := &admin.LaunchPlan{ @@ -4822,23 +4822,23 @@ func TestGetExecutionConfigOverrides(t *testing.T) { } execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan) assert.NoError(t, err) - assert.Equal(t, rmMaxParallelism, execConfig.MaxParallelism) - assert.Equal(t, rmInterruptible, execConfig.Interruptible.Value) - assert.Equal(t, rmOverwriteCache, execConfig.OverwriteCache) - assert.Equal(t, rmK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) - assert.Equal(t, rmOutputLocationPrefix, execConfig.RawOutputDataConfig.OutputLocationPrefix) - assert.Equal(t, rmLabels, execConfig.GetLabels().Values) - assert.Equal(t, rmAnnotations, execConfig.GetAnnotations().Values) + assert.Equal(t, rmMaxParallelism, execConfig.GetMaxParallelism()) + assert.Equal(t, rmInterruptible, execConfig.GetInterruptible().GetValue()) + assert.Equal(t, rmOverwriteCache, execConfig.GetOverwriteCache()) + assert.Equal(t, rmK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount()) + assert.Equal(t, rmOutputLocationPrefix, execConfig.GetRawOutputDataConfig().GetOutputLocationPrefix()) + assert.Equal(t, rmLabels, execConfig.GetLabels().GetValues()) + assert.Equal(t, rmAnnotations, execConfig.GetAnnotations().GetValues()) assert.Nil(t, execConfig.GetEnvs()) }) t.Run("matchable resource partial config", func(t *testing.T) { resourceManager.GetResourceFunc = func(ctx context.Context, request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) { assert.Contains(t, []managerInterfaces.ResourceRequest{{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG, - }, {Project: workflowIdentifier.Project, + }, {Project: workflowIdentifier.GetProject(), Domain: "", ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG}, }, request) @@ -4860,8 +4860,8 @@ func TestGetExecutionConfigOverrides(t *testing.T) { }, nil } request := &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{}, } launchPlan := &admin.LaunchPlan{ @@ -4869,23 +4869,23 @@ func TestGetExecutionConfigOverrides(t *testing.T) { } execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan) assert.NoError(t, err) - assert.Equal(t, rmMaxParallelism, execConfig.MaxParallelism) + assert.Equal(t, rmMaxParallelism, execConfig.GetMaxParallelism()) assert.Nil(t, execConfig.GetInterruptible()) - assert.False(t, execConfig.OverwriteCache) - assert.Equal(t, rmK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) + assert.False(t, execConfig.GetOverwriteCache()) + assert.Equal(t, rmK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount()) assert.Nil(t, execConfig.GetRawOutputDataConfig()) assert.Nil(t, execConfig.GetLabels()) - assert.Equal(t, rmAnnotations, execConfig.GetAnnotations().Values) + assert.Equal(t, rmAnnotations, execConfig.GetAnnotations().GetValues()) assert.Nil(t, execConfig.GetEnvs()) }) t.Run("matchable resource with no config", func(t *testing.T) { resourceManager.GetResourceFunc = func(ctx context.Context, request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) { assert.Contains(t, []managerInterfaces.ResourceRequest{{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG, - }, {Project: workflowIdentifier.Project, + }, {Project: workflowIdentifier.GetProject(), Domain: "", ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG}, }, request) @@ -4898,8 +4898,8 @@ func TestGetExecutionConfigOverrides(t *testing.T) { }, nil } request := &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{}, } launchPlan := &admin.LaunchPlan{ @@ -4907,10 +4907,10 @@ func TestGetExecutionConfigOverrides(t *testing.T) { } execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan) assert.NoError(t, err) - assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism) + assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism()) assert.Nil(t, execConfig.GetInterruptible()) - assert.False(t, execConfig.OverwriteCache) - assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) + assert.False(t, execConfig.GetOverwriteCache()) + assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount()) assert.Nil(t, execConfig.GetRawOutputDataConfig()) assert.Nil(t, execConfig.GetLabels()) assert.Nil(t, execConfig.GetAnnotations()) @@ -4920,10 +4920,10 @@ func TestGetExecutionConfigOverrides(t *testing.T) { resourceManager.GetResourceFunc = func(ctx context.Context, request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) { assert.Contains(t, []managerInterfaces.ResourceRequest{{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG, - }, {Project: workflowIdentifier.Project, + }, {Project: workflowIdentifier.GetProject(), Domain: "", ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG}, }, request) @@ -4937,8 +4937,8 @@ func TestGetExecutionConfigOverrides(t *testing.T) { }, nil } request := &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{}, } launchPlan := &admin.LaunchPlan{ @@ -4950,10 +4950,10 @@ func TestGetExecutionConfigOverrides(t *testing.T) { } execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan) assert.NoError(t, err) - assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism) + assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism()) assert.Nil(t, execConfig.GetInterruptible()) - assert.False(t, execConfig.OverwriteCache) - assert.Equal(t, deprecatedLaunchPlanK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) + assert.False(t, execConfig.GetOverwriteCache()) + assert.Equal(t, deprecatedLaunchPlanK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount()) assert.Nil(t, execConfig.GetRawOutputDataConfig()) assert.Nil(t, execConfig.GetLabels()) assert.Nil(t, execConfig.GetAnnotations()) @@ -4963,11 +4963,11 @@ func TestGetExecutionConfigOverrides(t *testing.T) { resourceManager.GetResourceFunc = func(ctx context.Context, request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) { assert.Contains(t, []managerInterfaces.ResourceRequest{{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG, - Workflow: workflowIdentifier.Name, - }, {Project: workflowIdentifier.Project, + Workflow: workflowIdentifier.GetName(), + }, {Project: workflowIdentifier.GetProject(), Domain: "", Workflow: "", ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG}, @@ -4991,23 +4991,23 @@ func TestGetExecutionConfigOverrides(t *testing.T) { }, nil } request := &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{}, } launchPlan := &admin.LaunchPlan{ Spec: &admin.LaunchPlanSpec{ WorkflowId: &core.Identifier{ - Name: workflowIdentifier.Name, + Name: workflowIdentifier.GetName(), }, }, } execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan) assert.NoError(t, err) - assert.Equal(t, int32(300), execConfig.MaxParallelism) - assert.True(t, execConfig.Interruptible.Value) - assert.True(t, execConfig.OverwriteCache) - assert.Equal(t, "workflowDefault", execConfig.SecurityContext.RunAs.K8SServiceAccount) + assert.Equal(t, int32(300), execConfig.GetMaxParallelism()) + assert.True(t, execConfig.GetInterruptible().GetValue()) + assert.True(t, execConfig.GetOverwriteCache()) + assert.Equal(t, "workflowDefault", execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount()) assert.Nil(t, execConfig.GetRawOutputDataConfig()) assert.Nil(t, execConfig.GetLabels()) assert.Nil(t, execConfig.GetAnnotations()) @@ -5017,18 +5017,18 @@ func TestGetExecutionConfigOverrides(t *testing.T) { resourceManager.GetResourceFunc = func(ctx context.Context, request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) { assert.Contains(t, []managerInterfaces.ResourceRequest{{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG, - }, {Project: workflowIdentifier.Project, + }, {Project: workflowIdentifier.GetProject(), Domain: "", ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG}, }, request) return nil, fmt.Errorf("failed to fetch the resources") } request := &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{}, } launchPlan := &admin.LaunchPlan{ @@ -5049,10 +5049,10 @@ func TestGetExecutionConfigOverrides(t *testing.T) { resourceManager.GetResourceFunc = func(ctx context.Context, request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) { assert.Contains(t, []managerInterfaces.ResourceRequest{{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG, - }, {Project: workflowIdentifier.Project, + }, {Project: workflowIdentifier.GetProject(), Domain: "", ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG}, }, request) @@ -5070,8 +5070,8 @@ func TestGetExecutionConfigOverrides(t *testing.T) { t.Run("request with interruptible override disabled", func(t *testing.T) { request := &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{ Interruptible: &wrappers.BoolValue{Value: false}, }, @@ -5079,17 +5079,17 @@ func TestGetExecutionConfigOverrides(t *testing.T) { execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, nil) assert.NoError(t, err) - assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism) - assert.False(t, execConfig.Interruptible.Value) - assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) + assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism()) + assert.False(t, execConfig.GetInterruptible().GetValue()) + assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount()) assert.Nil(t, execConfig.GetRawOutputDataConfig()) assert.Nil(t, execConfig.GetLabels()) assert.Nil(t, execConfig.GetAnnotations()) }) t.Run("request with interruptible override enabled", func(t *testing.T) { request := &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{ Interruptible: &wrappers.BoolValue{Value: true}, }, @@ -5097,33 +5097,33 @@ func TestGetExecutionConfigOverrides(t *testing.T) { execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, nil) assert.NoError(t, err) - assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism) - assert.True(t, execConfig.Interruptible.Value) - assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) + assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism()) + assert.True(t, execConfig.GetInterruptible().GetValue()) + assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount()) assert.Nil(t, execConfig.GetRawOutputDataConfig()) assert.Nil(t, execConfig.GetLabels()) assert.Nil(t, execConfig.GetAnnotations()) }) t.Run("request with no interruptible override specified", func(t *testing.T) { request := &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{}, } execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, nil) assert.NoError(t, err) - assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism) - assert.True(t, execConfig.Interruptible.Value) - assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) + assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism()) + assert.True(t, execConfig.GetInterruptible().GetValue()) + assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount()) assert.Nil(t, execConfig.GetRawOutputDataConfig()) assert.Nil(t, execConfig.GetLabels()) assert.Nil(t, execConfig.GetAnnotations()) }) t.Run("launch plan with interruptible override disabled", func(t *testing.T) { request := &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{}, } @@ -5135,17 +5135,17 @@ func TestGetExecutionConfigOverrides(t *testing.T) { execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan) assert.NoError(t, err) - assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism) - assert.False(t, execConfig.Interruptible.Value) - assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) + assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism()) + assert.False(t, execConfig.GetInterruptible().GetValue()) + assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount()) assert.Nil(t, execConfig.GetRawOutputDataConfig()) assert.Nil(t, execConfig.GetLabels()) assert.Nil(t, execConfig.GetAnnotations()) }) t.Run("launch plan with interruptible override enabled", func(t *testing.T) { request := &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{}, } @@ -5158,20 +5158,20 @@ func TestGetExecutionConfigOverrides(t *testing.T) { execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan) assert.NoError(t, err) - assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism) - assert.True(t, execConfig.Interruptible.Value) - assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) + assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism()) + assert.True(t, execConfig.GetInterruptible().GetValue()) + assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount()) assert.Nil(t, execConfig.GetRawOutputDataConfig()) assert.Nil(t, execConfig.GetLabels()) assert.Nil(t, execConfig.GetAnnotations()) - assert.Equal(t, 1, len(execConfig.Envs.Values)) - assert.Equal(t, "foo", execConfig.Envs.Values[0].Key) - assert.Equal(t, "bar", execConfig.Envs.Values[0].Value) + assert.Equal(t, 1, len(execConfig.GetEnvs().GetValues())) + assert.Equal(t, "foo", execConfig.GetEnvs().GetValues()[0].GetKey()) + assert.Equal(t, "bar", execConfig.GetEnvs().GetValues()[0].GetValue()) }) t.Run("launch plan with no interruptible override specified", func(t *testing.T) { request := &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{}, } @@ -5181,17 +5181,17 @@ func TestGetExecutionConfigOverrides(t *testing.T) { execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan) assert.NoError(t, err) - assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism) - assert.True(t, execConfig.Interruptible.Value) - assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) + assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism()) + assert.True(t, execConfig.GetInterruptible().GetValue()) + assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount()) assert.Nil(t, execConfig.GetRawOutputDataConfig()) assert.Nil(t, execConfig.GetLabels()) assert.Nil(t, execConfig.GetAnnotations()) }) t.Run("request and launch plan with different interruptible overrides", func(t *testing.T) { request := &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{ Interruptible: &wrappers.BoolValue{Value: true}, }, @@ -5205,17 +5205,17 @@ func TestGetExecutionConfigOverrides(t *testing.T) { execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan) assert.NoError(t, err) - assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism) - assert.True(t, execConfig.Interruptible.Value) - assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) + assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism()) + assert.True(t, execConfig.GetInterruptible().GetValue()) + assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount()) assert.Nil(t, execConfig.GetRawOutputDataConfig()) assert.Nil(t, execConfig.GetLabels()) assert.Nil(t, execConfig.GetAnnotations()) }) t.Run("request with skip cache override enabled", func(t *testing.T) { request := &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{ OverwriteCache: true, }, @@ -5223,33 +5223,33 @@ func TestGetExecutionConfigOverrides(t *testing.T) { execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, nil) assert.NoError(t, err) - assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism) - assert.True(t, execConfig.OverwriteCache) - assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) + assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism()) + assert.True(t, execConfig.GetOverwriteCache()) + assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount()) assert.Nil(t, execConfig.GetRawOutputDataConfig()) assert.Nil(t, execConfig.GetLabels()) assert.Nil(t, execConfig.GetAnnotations()) }) t.Run("request with no skip cache override specified", func(t *testing.T) { request := &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{}, } execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, nil) assert.NoError(t, err) - assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism) - assert.True(t, execConfig.OverwriteCache) - assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) + assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism()) + assert.True(t, execConfig.GetOverwriteCache()) + assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount()) assert.Nil(t, execConfig.GetRawOutputDataConfig()) assert.Nil(t, execConfig.GetLabels()) assert.Nil(t, execConfig.GetAnnotations()) }) t.Run("launch plan with skip cache override enabled", func(t *testing.T) { request := &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{}, } @@ -5261,17 +5261,17 @@ func TestGetExecutionConfigOverrides(t *testing.T) { execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan) assert.NoError(t, err) - assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism) - assert.True(t, execConfig.OverwriteCache) - assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) + assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism()) + assert.True(t, execConfig.GetOverwriteCache()) + assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount()) assert.Nil(t, execConfig.GetRawOutputDataConfig()) assert.Nil(t, execConfig.GetLabels()) assert.Nil(t, execConfig.GetAnnotations()) }) t.Run("launch plan with no skip cache override specified", func(t *testing.T) { request := &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{}, } @@ -5281,17 +5281,17 @@ func TestGetExecutionConfigOverrides(t *testing.T) { execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan) assert.NoError(t, err) - assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism) - assert.True(t, execConfig.OverwriteCache) - assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) + assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism()) + assert.True(t, execConfig.GetOverwriteCache()) + assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount()) assert.Nil(t, execConfig.GetRawOutputDataConfig()) assert.Nil(t, execConfig.GetLabels()) assert.Nil(t, execConfig.GetAnnotations()) }) t.Run("request and launch plan with different skip cache overrides", func(t *testing.T) { request := &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{ OverwriteCache: true, }, @@ -5305,9 +5305,9 @@ func TestGetExecutionConfigOverrides(t *testing.T) { execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan) assert.NoError(t, err) - assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism) - assert.True(t, execConfig.OverwriteCache) - assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) + assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism()) + assert.True(t, execConfig.GetOverwriteCache()) + assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount()) assert.Nil(t, execConfig.GetRawOutputDataConfig()) assert.Nil(t, execConfig.GetLabels()) assert.Nil(t, execConfig.GetAnnotations()) @@ -5316,13 +5316,13 @@ func TestGetExecutionConfigOverrides(t *testing.T) { t.Run("test pick up security context from admin system config", func(t *testing.T) { executionManager.config.ApplicationConfiguration().GetTopLevelConfig().K8SServiceAccount = "flyte-test" request := &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{}, } execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, nil) assert.NoError(t, err) - assert.Equal(t, "flyte-test", execConfig.SecurityContext.RunAs.K8SServiceAccount) + assert.Equal(t, "flyte-test", execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount()) executionManager.config.ApplicationConfiguration().GetTopLevelConfig().K8SServiceAccount = defaultK8sServiceAccount }) }) @@ -5333,10 +5333,10 @@ func TestGetExecutionConfig(t *testing.T) { resourceManager.GetResourceFunc = func(ctx context.Context, request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) { assert.Contains(t, []managerInterfaces.ResourceRequest{{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG, - }, {Project: workflowIdentifier.Project, + }, {Project: workflowIdentifier.GetProject(), Domain: "", ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG}, }, request) @@ -5358,13 +5358,13 @@ func TestGetExecutionConfig(t *testing.T) { config: applicationConfig, } execConfig, err := executionManager.getExecutionConfig(context.TODO(), &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{}, }, nil) assert.NoError(t, err) - assert.Equal(t, execConfig.MaxParallelism, int32(100)) - assert.True(t, execConfig.OverwriteCache) + assert.Equal(t, execConfig.GetMaxParallelism(), int32(100)) + assert.True(t, execConfig.GetOverwriteCache()) } func TestGetExecutionConfig_Spec(t *testing.T) { @@ -5379,8 +5379,8 @@ func TestGetExecutionConfig_Spec(t *testing.T) { config: applicationConfig, } execConfig, err := executionManager.getExecutionConfig(context.TODO(), &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{ MaxParallelism: 100, OverwriteCache: true, @@ -5392,12 +5392,12 @@ func TestGetExecutionConfig_Spec(t *testing.T) { }, }) assert.NoError(t, err) - assert.Equal(t, int32(100), execConfig.MaxParallelism) - assert.True(t, execConfig.OverwriteCache) + assert.Equal(t, int32(100), execConfig.GetMaxParallelism()) + assert.True(t, execConfig.GetOverwriteCache()) execConfig, err = executionManager.getExecutionConfig(context.TODO(), &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{}, }, &admin.LaunchPlan{ Spec: &admin.LaunchPlanSpec{ @@ -5406,8 +5406,8 @@ func TestGetExecutionConfig_Spec(t *testing.T) { }, }) assert.NoError(t, err) - assert.Equal(t, int32(50), execConfig.MaxParallelism) - assert.True(t, execConfig.OverwriteCache) + assert.Equal(t, int32(50), execConfig.GetMaxParallelism()) + assert.True(t, execConfig.GetOverwriteCache()) resourceManager = managerMocks.MockResourceManager{} resourceManager.GetResourceFunc = func(ctx context.Context, @@ -5422,15 +5422,15 @@ func TestGetExecutionConfig_Spec(t *testing.T) { executionManager.config.ApplicationConfiguration().GetTopLevelConfig().OverwriteCache = true execConfig, err = executionManager.getExecutionConfig(context.TODO(), &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{}, }, &admin.LaunchPlan{ Spec: &admin.LaunchPlanSpec{}, }) assert.NoError(t, err) - assert.Equal(t, execConfig.MaxParallelism, int32(25)) - assert.True(t, execConfig.OverwriteCache) + assert.Equal(t, execConfig.GetMaxParallelism(), int32(25)) + assert.True(t, execConfig.GetOverwriteCache()) } func TestGetClusterAssignment(t *testing.T) { @@ -5439,8 +5439,8 @@ func TestGetClusterAssignment(t *testing.T) { resourceManager.GetResourceFunc = func(ctx context.Context, request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) { assert.EqualValues(t, request, managerInterfaces.ResourceRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), ResourceType: admin.MatchableResource_CLUSTER_ASSIGNMENT, }) return &managerInterfaces.ResourceResponse{ @@ -5457,8 +5457,8 @@ func TestGetClusterAssignment(t *testing.T) { } t.Run("value from db", func(t *testing.T) { ca, err := executionManager.getClusterAssignment(context.TODO(), &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{}, }) assert.NoError(t, err) @@ -5481,8 +5481,8 @@ func TestGetClusterAssignment(t *testing.T) { } ca, err := executionManager.getClusterAssignment(context.TODO(), &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{}, }) assert.NoError(t, err) @@ -5491,8 +5491,8 @@ func TestGetClusterAssignment(t *testing.T) { t.Run("value from request matches value from config", func(t *testing.T) { reqClusterAssignment := admin.ClusterAssignment{ClusterPoolName: "gpu"} ca, err := executionManager.getClusterAssignment(context.TODO(), &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{ ClusterAssignment: &reqClusterAssignment, }, @@ -5510,8 +5510,8 @@ func TestGetClusterAssignment(t *testing.T) { reqClusterAssignment := admin.ClusterAssignment{ClusterPoolName: "gpu"} ca, err := executionManager.getClusterAssignment(context.TODO(), &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{ ClusterAssignment: &reqClusterAssignment, }, @@ -5536,8 +5536,8 @@ func TestGetClusterAssignment(t *testing.T) { reqClusterAssignment := admin.ClusterAssignment{ClusterPoolName: "gpu"} ca, err := executionManager.getClusterAssignment(context.TODO(), &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{ ClusterAssignment: &reqClusterAssignment, }, @@ -5548,8 +5548,8 @@ func TestGetClusterAssignment(t *testing.T) { t.Run("value from request doesn't match value from config", func(t *testing.T) { reqClusterAssignment := admin.ClusterAssignment{ClusterPoolName: "swimming-pool"} _, err := executionManager.getClusterAssignment(context.TODO(), &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{ ClusterAssignment: &reqClusterAssignment, }, @@ -5564,8 +5564,8 @@ func TestGetClusterAssignment(t *testing.T) { resourceManager.GetResourceFunc = func(ctx context.Context, request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) { assert.EqualValues(t, request, managerInterfaces.ResourceRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), ResourceType: admin.MatchableResource_CLUSTER_ASSIGNMENT, }) return &managerInterfaces.ResourceResponse{ @@ -5578,8 +5578,8 @@ func TestGetClusterAssignment(t *testing.T) { } _, err := executionManager.getClusterAssignment(context.TODO(), &admin.ExecutionCreateRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), Spec: &admin.ExecutionSpec{}, }) @@ -5622,8 +5622,8 @@ func TestResolvePermissions(t *testing.T) { } authRole := resolveAuthRole(execRequest, lp) sc := resolveSecurityCtx(context.TODO(), execConfigSecCtx, authRole) - assert.Equal(t, assumableIamRole, authRole.AssumableIamRole) - assert.Equal(t, k8sServiceAccount, authRole.KubernetesServiceAccount) + assert.Equal(t, assumableIamRole, authRole.GetAssumableIamRole()) + assert.Equal(t, k8sServiceAccount, authRole.GetKubernetesServiceAccount()) assert.Equal(t, &core.SecurityContext{ RunAs: &core.Identity{ IamRole: assumableIamRole, @@ -5659,10 +5659,10 @@ func TestResolvePermissions(t *testing.T) { }, } sc := resolveSecurityCtx(context.TODO(), execConfigSecCtx, authRole) - assert.Equal(t, "", authRole.AssumableIamRole) - assert.Equal(t, "", authRole.KubernetesServiceAccount) - assert.Equal(t, assumableIamRoleSc, sc.RunAs.IamRole) - assert.Equal(t, k8sServiceAccountSc, sc.RunAs.K8SServiceAccount) + assert.Equal(t, "", authRole.GetAssumableIamRole()) + assert.Equal(t, "", authRole.GetKubernetesServiceAccount()) + assert.Equal(t, assumableIamRoleSc, sc.GetRunAs().GetIamRole()) + assert.Equal(t, k8sServiceAccountSc, sc.GetRunAs().GetK8SServiceAccount()) }) t.Run("prefer lp auth role over auth", func(t *testing.T) { execRequest := &admin.ExecutionCreateRequest{ @@ -5685,8 +5685,8 @@ func TestResolvePermissions(t *testing.T) { RunAs: &core.Identity{}, } sc := resolveSecurityCtx(context.TODO(), execConfigSecCtx, authRole) - assert.Equal(t, assumableIamRole, authRole.AssumableIamRole) - assert.Equal(t, k8sServiceAccount, authRole.KubernetesServiceAccount) + assert.Equal(t, assumableIamRole, authRole.GetAssumableIamRole()) + assert.Equal(t, k8sServiceAccount, authRole.GetKubernetesServiceAccount()) assert.Equal(t, &core.SecurityContext{ RunAs: &core.Identity{ IamRole: assumableIamRole, @@ -5731,10 +5731,10 @@ func TestResolvePermissions(t *testing.T) { }, } sc := resolveSecurityCtx(context.TODO(), execConfigSecCtx, authRole) - assert.Equal(t, assumableIamRole, authRole.AssumableIamRole) - assert.Equal(t, k8sServiceAccount, authRole.KubernetesServiceAccount) - assert.Equal(t, assumableIamRoleSc, sc.RunAs.IamRole) - assert.Equal(t, k8sServiceAccountSc, sc.RunAs.K8SServiceAccount) + assert.Equal(t, assumableIamRole, authRole.GetAssumableIamRole()) + assert.Equal(t, k8sServiceAccount, authRole.GetKubernetesServiceAccount()) + assert.Equal(t, assumableIamRoleSc, sc.GetRunAs().GetIamRole()) + assert.Equal(t, k8sServiceAccountSc, sc.GetRunAs().GetK8SServiceAccount()) }) t.Run("prefer lp auth over role", func(t *testing.T) { execRequest := &admin.ExecutionCreateRequest{ @@ -5757,8 +5757,8 @@ func TestResolvePermissions(t *testing.T) { }, } sc := resolveSecurityCtx(context.TODO(), execConfigSecCtx, authRole) - assert.Equal(t, assumableIamRole, authRole.AssumableIamRole) - assert.Equal(t, k8sServiceAccount, authRole.KubernetesServiceAccount) + assert.Equal(t, assumableIamRole, authRole.GetAssumableIamRole()) + assert.Equal(t, k8sServiceAccount, authRole.GetKubernetesServiceAccount()) assert.Equal(t, &core.SecurityContext{ RunAs: &core.Identity{ IamRole: assumableIamRole, @@ -5778,8 +5778,8 @@ func TestResolvePermissions(t *testing.T) { Role: "old role", }, }) - assert.Equal(t, assumableIamRoleLp, authRole.AssumableIamRole) - assert.Equal(t, k8sServiceAccountLp, authRole.KubernetesServiceAccount) + assert.Equal(t, assumableIamRoleLp, authRole.GetAssumableIamRole()) + assert.Equal(t, k8sServiceAccountLp, authRole.GetKubernetesServiceAccount()) }) } @@ -5859,7 +5859,7 @@ func TestQueryTemplate(t *testing.T) { }, } - filledQuery, err := m.fillInTemplateArgs(ctx, q, otherInputs.Literals) + filledQuery, err := m.fillInTemplateArgs(ctx, q, otherInputs.GetLiterals()) assert.NoError(t, err) assert.True(t, proto.Equal(q, filledQuery)) }) @@ -5881,11 +5881,11 @@ func TestQueryTemplate(t *testing.T) { }, } - filledQuery, err := m.fillInTemplateArgs(ctx, q, otherInputs.Literals) + filledQuery, err := m.fillInTemplateArgs(ctx, q, otherInputs.GetLiterals()) assert.NoError(t, err) - staticTime := filledQuery.GetArtifactId().Partitions.Value["partition1"].GetStaticValue() + staticTime := filledQuery.GetArtifactId().GetPartitions().GetValue()["partition1"].GetStaticValue() assert.Equal(t, "2063-04-05", staticTime) - assert.Equal(t, int64(2942956800), filledQuery.GetArtifactId().TimePartition.Value.GetTimeValue().Seconds) + assert.Equal(t, int64(2942956800), filledQuery.GetArtifactId().GetTimePartition().GetValue().GetTimeValue().GetSeconds()) }) t.Run("something missing", func(t *testing.T) { @@ -5905,7 +5905,7 @@ func TestQueryTemplate(t *testing.T) { }, } - _, err := m.fillInTemplateArgs(ctx, q, otherInputs.Literals) + _, err := m.fillInTemplateArgs(ctx, q, otherInputs.GetLiterals()) assert.Error(t, err) }) } diff --git a/flyteadmin/pkg/manager/impl/executions/quality_of_service.go b/flyteadmin/pkg/manager/impl/executions/quality_of_service.go index a96d99d3d6..c2b6f8d3da 100644 --- a/flyteadmin/pkg/manager/impl/executions/quality_of_service.go +++ b/flyteadmin/pkg/manager/impl/executions/quality_of_service.go @@ -37,9 +37,9 @@ type qualityOfServiceAllocator struct { func (q qualityOfServiceAllocator) getQualityOfServiceFromDb(ctx context.Context, workflowIdentifier *core.Identifier) ( *core.QualityOfService, error) { resource, err := q.resourceManager.GetResource(ctx, interfaces.ResourceRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, - Workflow: workflowIdentifier.Name, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), + Workflow: workflowIdentifier.GetName(), ResourceType: admin.MatchableResource_QUALITY_OF_SERVICE_SPECIFICATION, }) if err != nil { @@ -79,63 +79,62 @@ QualityOfService spec to apply. This method handles resolving the QualityOfService for an execution given the above rules. */ func (q qualityOfServiceAllocator) GetQualityOfService(ctx context.Context, input GetQualityOfServiceInput) (QualityOfServiceSpec, error) { - workflowIdentifier := input.Workflow.Id + workflowIdentifier := input.Workflow.GetId() var qualityOfServiceTier core.QualityOfService_Tier - if input.ExecutionCreateRequest.Spec.QualityOfService != nil { - if input.ExecutionCreateRequest.Spec.QualityOfService.GetSpec() != nil { + if input.ExecutionCreateRequest.GetSpec().GetQualityOfService() != nil { + if input.ExecutionCreateRequest.GetSpec().GetQualityOfService().GetSpec() != nil { logger.Debugf(ctx, "Determining quality of service from execution spec for [%s/%s/%s]", - input.ExecutionCreateRequest.Project, input.ExecutionCreateRequest.Domain, - input.ExecutionCreateRequest.Name) - duration, err := ptypes.Duration(input.ExecutionCreateRequest.Spec.QualityOfService.GetSpec().QueueingBudget) + input.ExecutionCreateRequest.GetProject(), input.ExecutionCreateRequest.GetDomain(), + input.ExecutionCreateRequest.GetName()) + duration, err := ptypes.Duration(input.ExecutionCreateRequest.GetSpec().GetQualityOfService().GetSpec().GetQueueingBudget()) if err != nil { return QualityOfServiceSpec{}, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "Invalid custom quality of service set in create execution request [%s/%s/%s], failed to parse duration [%v] with: %v", - input.ExecutionCreateRequest.Project, input.ExecutionCreateRequest.Domain, - input.ExecutionCreateRequest.Name, - input.ExecutionCreateRequest.Spec.QualityOfService.GetSpec().QueueingBudget, err) + input.ExecutionCreateRequest.GetProject(), input.ExecutionCreateRequest.GetDomain(), + input.ExecutionCreateRequest.GetName(), + input.ExecutionCreateRequest.GetSpec().GetQualityOfService().GetSpec().GetQueueingBudget(), err) } return QualityOfServiceSpec{ QueuingBudget: duration, }, nil } - qualityOfServiceTier = input.ExecutionCreateRequest.Spec.QualityOfService.GetTier() - } else if input.LaunchPlan.Spec.QualityOfService != nil { - if input.LaunchPlan.Spec.QualityOfService.GetSpec() != nil { + qualityOfServiceTier = input.ExecutionCreateRequest.GetSpec().GetQualityOfService().GetTier() + } else if input.LaunchPlan.GetSpec().GetQualityOfService() != nil { + if input.LaunchPlan.GetSpec().GetQualityOfService().GetSpec() != nil { logger.Debugf(ctx, "Determining quality of service from launch plan spec for [%s/%s/%s]", - input.ExecutionCreateRequest.Project, input.ExecutionCreateRequest.Domain, - input.ExecutionCreateRequest.Name) - duration, err := ptypes.Duration(input.LaunchPlan.Spec.QualityOfService.GetSpec().QueueingBudget) + input.ExecutionCreateRequest.GetProject(), input.ExecutionCreateRequest.GetDomain(), + input.ExecutionCreateRequest.GetName()) + duration, err := ptypes.Duration(input.LaunchPlan.GetSpec().GetQualityOfService().GetSpec().GetQueueingBudget()) if err != nil { return QualityOfServiceSpec{}, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "Invalid custom quality of service set in launch plan [%v], failed to parse duration [%v] with: %v", - input.LaunchPlan.Id, - input.ExecutionCreateRequest.Spec.QualityOfService.GetSpec().QueueingBudget, err) + input.LaunchPlan.GetId(), + input.ExecutionCreateRequest.GetSpec().GetQualityOfService().GetSpec().GetQueueingBudget(), err) } return QualityOfServiceSpec{ QueuingBudget: duration, }, nil } - qualityOfServiceTier = input.LaunchPlan.Spec.QualityOfService.GetTier() - } else if input.Workflow.Closure.CompiledWorkflow.Primary.Template.Metadata != nil && - input.Workflow.Closure.CompiledWorkflow.Primary.Template.Metadata.QualityOfService != nil { + qualityOfServiceTier = input.LaunchPlan.GetSpec().GetQualityOfService().GetTier() + } else if input.Workflow.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetMetadata() != nil && + input.Workflow.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetMetadata().GetQualityOfService() != nil { logger.Debugf(ctx, "Determining quality of service from workflow spec for [%s/%s/%s]", - input.ExecutionCreateRequest.Project, input.ExecutionCreateRequest.Domain, - input.ExecutionCreateRequest.Name) - if input.Workflow.Closure.CompiledWorkflow.Primary.Template.Metadata.QualityOfService.GetSpec() != nil { - duration, err := ptypes.Duration(input.Workflow.Closure.CompiledWorkflow.Primary.Template.Metadata.QualityOfService. - GetSpec().QueueingBudget) + input.ExecutionCreateRequest.GetProject(), input.ExecutionCreateRequest.GetDomain(), + input.ExecutionCreateRequest.GetName()) + if input.Workflow.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetMetadata().GetQualityOfService().GetSpec() != nil { + duration, err := ptypes.Duration(input.Workflow.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetMetadata().GetQualityOfService().GetSpec().GetQueueingBudget()) if err != nil { return QualityOfServiceSpec{}, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "Invalid custom quality of service set in workflow [%v], failed to parse duration [%v] with: %v", workflowIdentifier, - input.ExecutionCreateRequest.Spec.QualityOfService.GetSpec().QueueingBudget, err) + input.ExecutionCreateRequest.GetSpec().GetQualityOfService().GetSpec().GetQueueingBudget(), err) } return QualityOfServiceSpec{ QueuingBudget: duration, }, nil } - qualityOfServiceTier = input.Workflow.Closure.CompiledWorkflow.Primary.Template.Metadata.QualityOfService.GetTier() + qualityOfServiceTier = input.Workflow.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetMetadata().GetQualityOfService().GetTier() } // If nothing in the hierarchy of registrable entities has set the quality of service, @@ -147,23 +146,23 @@ func (q qualityOfServiceAllocator) GetQualityOfService(ctx context.Context, inpu } if qualityOfService != nil && qualityOfService.GetSpec() != nil { logger.Debugf(ctx, "Determining quality of service from spec database override for [%s/%s/%s]", - input.ExecutionCreateRequest.Project, input.ExecutionCreateRequest.Domain, - input.ExecutionCreateRequest.Name) - duration, err := ptypes.Duration(qualityOfService.GetSpec().QueueingBudget) + input.ExecutionCreateRequest.GetProject(), input.ExecutionCreateRequest.GetDomain(), + input.ExecutionCreateRequest.GetName()) + duration, err := ptypes.Duration(qualityOfService.GetSpec().GetQueueingBudget()) if err != nil { return QualityOfServiceSpec{}, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "Invalid custom quality of service set in overridable matching attributes for [%v],"+ "failed to parse duration [%v] with: %v", workflowIdentifier, - input.ExecutionCreateRequest.Spec.QualityOfService.GetSpec().QueueingBudget, err) + input.ExecutionCreateRequest.GetSpec().GetQualityOfService().GetSpec().GetQueueingBudget(), err) } return QualityOfServiceSpec{ QueuingBudget: duration, }, nil } else if qualityOfService != nil && qualityOfService.GetTier() != core.QualityOfService_UNDEFINED { logger.Debugf(ctx, "Determining quality of service tier from database override for [%s/%s/%s]", - input.ExecutionCreateRequest.Project, input.ExecutionCreateRequest.Domain, - input.ExecutionCreateRequest.Name) - qualityOfServiceTier = input.Workflow.Closure.CompiledWorkflow.Primary.Template.Metadata.QualityOfService.GetTier() + input.ExecutionCreateRequest.GetProject(), input.ExecutionCreateRequest.GetDomain(), + input.ExecutionCreateRequest.GetName()) + qualityOfServiceTier = input.Workflow.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetMetadata().GetQualityOfService().GetTier() } } @@ -171,10 +170,10 @@ func (q qualityOfServiceAllocator) GetQualityOfService(ctx context.Context, inpu // set, use the default values from the admin application config. if qualityOfServiceTier == core.QualityOfService_UNDEFINED { logger.Debugf(ctx, "Determining quality of service tier from application config override for [%s/%s/%s]", - input.ExecutionCreateRequest.Project, input.ExecutionCreateRequest.Domain, - input.ExecutionCreateRequest.Name) + input.ExecutionCreateRequest.GetProject(), input.ExecutionCreateRequest.GetDomain(), + input.ExecutionCreateRequest.GetName()) var ok bool - qualityOfServiceTier, ok = q.config.QualityOfServiceConfiguration().GetDefaultTiers()[input.ExecutionCreateRequest.Domain] + qualityOfServiceTier, ok = q.config.QualityOfServiceConfiguration().GetDefaultTiers()[input.ExecutionCreateRequest.GetDomain()] if !ok { // No queueing budget to set when no default is specified return QualityOfServiceSpec{}, nil @@ -186,10 +185,10 @@ func (q qualityOfServiceAllocator) GetQualityOfService(ctx context.Context, inpu return QualityOfServiceSpec{}, nil } logger.Debugf(ctx, "Determining quality of service spec from application config override for [%s/%s/%s] with tier [%v]", - input.ExecutionCreateRequest.Project, input.ExecutionCreateRequest.Domain, - input.ExecutionCreateRequest.Name, qualityOfServiceTier) + input.ExecutionCreateRequest.GetProject(), input.ExecutionCreateRequest.GetDomain(), + input.ExecutionCreateRequest.GetName(), qualityOfServiceTier) // Config values should always be vetted so there's no need to check the error from conversion. - duration, _ := ptypes.Duration(executionValues.QueueingBudget) + duration, _ := ptypes.Duration(executionValues.GetQueueingBudget()) return QualityOfServiceSpec{ QueuingBudget: duration, diff --git a/flyteadmin/pkg/manager/impl/executions/quality_of_service_test.go b/flyteadmin/pkg/manager/impl/executions/quality_of_service_test.go index 41a04ec2bc..0ad76cd3c7 100644 --- a/flyteadmin/pkg/manager/impl/executions/quality_of_service_test.go +++ b/flyteadmin/pkg/manager/impl/executions/quality_of_service_test.go @@ -63,9 +63,9 @@ func addGetResourceFunc(t *testing.T, resourceManager interfaces.ResourceInterfa resourceManager.(*managerMocks.MockResourceManager).GetResourceFunc = func(ctx context.Context, request interfaces.ResourceRequest) (*interfaces.ResourceResponse, error) { assert.EqualValues(t, request, interfaces.ResourceRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, - Workflow: workflowIdentifier.Name, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), + Workflow: workflowIdentifier.GetName(), ResourceType: admin.MatchableResource_QUALITY_OF_SERVICE_SPECIFICATION, }) return &interfaces.ResourceResponse{ diff --git a/flyteadmin/pkg/manager/impl/executions/queues.go b/flyteadmin/pkg/manager/impl/executions/queues.go index 90a5951a33..2064626717 100644 --- a/flyteadmin/pkg/manager/impl/executions/queues.go +++ b/flyteadmin/pkg/manager/impl/executions/queues.go @@ -59,9 +59,9 @@ func (q *queueAllocatorImpl) GetQueue(ctx context.Context, identifier *core.Iden q.refreshExecutionQueues(executionQueues) resource, err := q.resourceManager.GetResource(ctx, interfaces.ResourceRequest{ - Project: identifier.Project, - Domain: identifier.Domain, - Workflow: identifier.Name, + Project: identifier.GetProject(), + Domain: identifier.GetDomain(), + Workflow: identifier.GetName(), ResourceType: admin.MatchableResource_EXECUTION_QUEUE, }) @@ -71,7 +71,7 @@ func (q *queueAllocatorImpl) GetQueue(ctx context.Context, identifier *core.Iden } if resource != nil && resource.Attributes != nil && resource.Attributes.GetExecutionQueueAttributes() != nil { - for _, tag := range resource.Attributes.GetExecutionQueueAttributes().Tags { + for _, tag := range resource.Attributes.GetExecutionQueueAttributes().GetTags() { matches, ok := q.queueConfigMap[tag] if !ok { continue @@ -84,7 +84,7 @@ func (q *queueAllocatorImpl) GetQueue(ctx context.Context, identifier *core.Iden var defaultTags []string // If we've made it this far, check to see if a domain-specific default workflow config exists for this particular domain. for _, workflowConfig := range q.config.QueueConfiguration().GetWorkflowConfigs() { - if workflowConfig.Domain == identifier.Domain { + if workflowConfig.Domain == identifier.GetDomain() { tags = workflowConfig.Tags } else if len(workflowConfig.Domain) == 0 { defaultTags = workflowConfig.Tags diff --git a/flyteadmin/pkg/manager/impl/launch_plan_manager.go b/flyteadmin/pkg/manager/impl/launch_plan_manager.go index 74f0571f86..b1d0d8d56d 100644 --- a/flyteadmin/pkg/manager/impl/launch_plan_manager.go +++ b/flyteadmin/pkg/manager/impl/launch_plan_manager.go @@ -41,13 +41,13 @@ type LaunchPlanManager struct { } func getLaunchPlanContext(ctx context.Context, identifier *core.Identifier) context.Context { - ctx = contextutils.WithProjectDomain(ctx, identifier.Project, identifier.Domain) - return contextutils.WithLaunchPlanID(ctx, identifier.Name) + ctx = contextutils.WithProjectDomain(ctx, identifier.GetProject(), identifier.GetDomain()) + return contextutils.WithLaunchPlanID(ctx, identifier.GetName()) } func (m *LaunchPlanManager) getNamedEntityContext(ctx context.Context, identifier *admin.NamedEntityIdentifier) context.Context { - ctx = contextutils.WithProjectDomain(ctx, identifier.Project, identifier.Domain) - return contextutils.WithLaunchPlanID(ctx, identifier.Name) + ctx = contextutils.WithProjectDomain(ctx, identifier.GetProject(), identifier.GetDomain()) + return contextutils.WithLaunchPlanID(ctx, identifier.GetName()) } func (m *LaunchPlanManager) CreateLaunchPlan( @@ -57,35 +57,35 @@ func (m *LaunchPlanManager) CreateLaunchPlan( logger.Debugf(ctx, "Failed to validate provided workflow ID for CreateLaunchPlan with err: %v", err) return nil, err } - workflowModel, err := util.GetWorkflowModel(ctx, m.db, request.Spec.WorkflowId) + workflowModel, err := util.GetWorkflowModel(ctx, m.db, request.GetSpec().GetWorkflowId()) if err != nil { logger.Debugf(ctx, "Failed to get workflow with id [%+v] for CreateLaunchPlan with id [%+v] with err %v", - request.Spec.WorkflowId, request.Id) + request.GetSpec().GetWorkflowId(), request.GetId()) return nil, err } var workflowInterface core.TypedInterface - if workflowModel.TypedInterface != nil && len(workflowModel.TypedInterface) > 0 { + if len(workflowModel.TypedInterface) > 0 { err = proto.Unmarshal(workflowModel.TypedInterface, &workflowInterface) if err != nil { logger.Errorf(ctx, "Failed to unmarshal TypedInterface for workflow [%+v] with err: %v", - request.Spec.WorkflowId, err) + request.GetSpec().GetWorkflowId(), err) return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal workflow inputs") } } if err := validation.ValidateLaunchPlan(ctx, request, m.db, m.config.ApplicationConfiguration(), &workflowInterface); err != nil { - logger.Debugf(ctx, "could not create launch plan: %+v, request failed validation with err: %v", request.Id, err) + logger.Debugf(ctx, "could not create launch plan: %+v, request failed validation with err: %v", request.GetId(), err) return nil, err } - ctx = getLaunchPlanContext(ctx, request.Id) - launchPlan := transformers.CreateLaunchPlan(request, workflowInterface.Outputs) + ctx = getLaunchPlanContext(ctx, request.GetId()) + launchPlan := transformers.CreateLaunchPlan(request, workflowInterface.GetOutputs()) launchPlanDigest, err := util.GetLaunchPlanDigest(ctx, launchPlan) if err != nil { - logger.Errorf(ctx, "failed to compute launch plan digest for [%+v] with err: %v", launchPlan.Id, err) + logger.Errorf(ctx, "failed to compute launch plan digest for [%+v] with err: %v", launchPlan.GetId(), err) return nil, err } - existingLaunchPlanModel, err := util.GetLaunchPlanModel(ctx, m.db, request.Id) + existingLaunchPlanModel, err := util.GetLaunchPlanModel(ctx, m.db, request.GetId()) if err == nil { if bytes.Equal(existingLaunchPlanModel.Digest, launchPlanDigest) { return nil, errors.NewLaunchPlanExistsIdenticalStructureError(ctx, request) @@ -96,7 +96,7 @@ func (m *LaunchPlanManager) CreateLaunchPlan( return nil, transformerErr } // A launch plan exists with different structure - return nil, errors.NewLaunchPlanExistsDifferentStructureError(ctx, request, existingLaunchPlan.Spec, launchPlan.Spec) + return nil, errors.NewLaunchPlanExistsDifferentStructureError(ctx, request, existingLaunchPlan.GetSpec(), launchPlan.GetSpec()) } launchPlanModel, err := @@ -104,12 +104,12 @@ func (m *LaunchPlanManager) CreateLaunchPlan( if err != nil { logger.Errorf(ctx, "Failed to transform launch plan model [%+v], and workflow outputs [%+v] with err: %v", - request, workflowInterface.Outputs, err) + request, workflowInterface.GetOutputs(), err) return nil, err } err = m.db.LaunchPlanRepo().Create(ctx, launchPlanModel) if err != nil { - logger.Errorf(ctx, "Failed to save launch plan model %+v with err: %v", request.Id, err) + logger.Errorf(ctx, "Failed to save launch plan model %+v with err: %v", request.GetId(), err) return nil, err } m.metrics.SpecSizeBytes.Observe(float64(len(launchPlanModel.Spec))) @@ -143,7 +143,7 @@ func isScheduleEmpty(launchPlanSpec *admin.LaunchPlanSpec) bool { if schedule == nil { return true } - if schedule.GetCronSchedule() != nil && len(schedule.GetCronSchedule().Schedule) != 0 { + if schedule.GetCronSchedule() != nil && len(schedule.GetCronSchedule().GetSchedule()) != 0 { return false } if len(schedule.GetCronExpression()) != 0 { @@ -160,7 +160,7 @@ func (m *LaunchPlanManager) enableSchedule(ctx context.Context, launchPlanIdenti addScheduleInput, err := m.scheduler.CreateScheduleInput(ctx, m.config.ApplicationConfiguration().GetSchedulerConfig(), launchPlanIdentifier, - launchPlanSpec.EntityMetadata.Schedule) + launchPlanSpec.GetEntityMetadata().GetSchedule()) if err != nil { return err } @@ -223,30 +223,30 @@ func (m *LaunchPlanManager) updateSchedules( func (m *LaunchPlanManager) disableLaunchPlan(ctx context.Context, request *admin.LaunchPlanUpdateRequest) ( *admin.LaunchPlanUpdateResponse, error) { - if err := validation.ValidateIdentifier(request.Id, common.LaunchPlan); err != nil { - logger.Debugf(ctx, "can't disable launch plan [%+v] with invalid identifier: %v", request.Id, err) + if err := validation.ValidateIdentifier(request.GetId(), common.LaunchPlan); err != nil { + logger.Debugf(ctx, "can't disable launch plan [%+v] with invalid identifier: %v", request.GetId(), err) return nil, err } - launchPlanModel, err := util.GetLaunchPlanModel(ctx, m.db, request.Id) + launchPlanModel, err := util.GetLaunchPlanModel(ctx, m.db, request.GetId()) if err != nil { - logger.Debugf(ctx, "couldn't find launch plan [%+v] to disable with err: %v", request.Id, err) + logger.Debugf(ctx, "couldn't find launch plan [%+v] to disable with err: %v", request.GetId(), err) return nil, err } err = m.updateLaunchPlanModelState(&launchPlanModel, admin.LaunchPlanState_INACTIVE) if err != nil { - logger.Debugf(ctx, "failed to disable launch plan [%+v] with err: %v", request.Id, err) + logger.Debugf(ctx, "failed to disable launch plan [%+v] with err: %v", request.GetId(), err) return nil, err } var launchPlanSpec admin.LaunchPlanSpec err = proto.Unmarshal(launchPlanModel.Spec, &launchPlanSpec) if err != nil { - logger.Errorf(ctx, "failed to unmarshal launch plan spec when disabling schedule for %+v", request.Id) + logger.Errorf(ctx, "failed to unmarshal launch plan spec when disabling schedule for %+v", request.GetId()) return nil, errors.NewFlyteAdminErrorf(codes.Internal, - "failed to unmarshal launch plan spec when disabling schedule for %+v", request.Id) + "failed to unmarshal launch plan spec when disabling schedule for %+v", request.GetId()) } - if launchPlanSpec.EntityMetadata != nil && launchPlanSpec.EntityMetadata.Schedule != nil { + if launchPlanSpec.GetEntityMetadata() != nil && launchPlanSpec.GetEntityMetadata().GetSchedule() != nil { err = m.disableSchedule(ctx, &core.Identifier{ Project: launchPlanModel.Project, Domain: launchPlanModel.Domain, @@ -259,23 +259,23 @@ func (m *LaunchPlanManager) disableLaunchPlan(ctx context.Context, request *admi } err = m.db.LaunchPlanRepo().Update(ctx, launchPlanModel) if err != nil { - logger.Debugf(ctx, "Failed to update launchPlanModel with ID [%+v] with err %v", request.Id, err) + logger.Debugf(ctx, "Failed to update launchPlanModel with ID [%+v] with err %v", request.GetId(), err) return nil, err } - logger.Debugf(ctx, "disabled launch plan: [%+v]", request.Id) + logger.Debugf(ctx, "disabled launch plan: [%+v]", request.GetId()) return &admin.LaunchPlanUpdateResponse{}, nil } func (m *LaunchPlanManager) enableLaunchPlan(ctx context.Context, request *admin.LaunchPlanUpdateRequest) ( *admin.LaunchPlanUpdateResponse, error) { newlyActiveLaunchPlanModel, err := m.db.LaunchPlanRepo().Get(ctx, repoInterfaces.Identifier{ - Project: request.Id.Project, - Domain: request.Id.Domain, - Name: request.Id.Name, - Version: request.Id.Version, + Project: request.GetId().GetProject(), + Domain: request.GetId().GetDomain(), + Name: request.GetId().GetName(), + Version: request.GetId().GetVersion(), }) if err != nil { - logger.Debugf(ctx, "Failed to find launch plan to enable with id [%+v] and err %v", request.Id, err) + logger.Debugf(ctx, "Failed to find launch plan to enable with id [%+v] and err %v", request.GetId(), err) return nil, err } // Set desired launch plan version to active: @@ -298,13 +298,12 @@ func (m *LaunchPlanManager) enableLaunchPlan(ctx context.Context, request *admin // Not found is fine, there isn't always a guaranteed active launch plan model. if err.(errors.FlyteAdminError).Code() != codes.NotFound { logger.Infof(ctx, "Failed to search for an active launch plan model with project: %s, domain: %s, name: %s and err %v", - request.Id.Project, request.Id.Domain, request.Id.Name, err) + request.GetId().GetProject(), request.GetId().GetDomain(), request.GetId().GetName(), err) return nil, err } logger.Debugf(ctx, "No active launch plan model found to disable with project: %s, domain: %s, name: %s", - request.Id.Project, request.Id.Domain, request.Id.Name) - } else if formerlyActiveLaunchPlanModelOutput.LaunchPlans != nil && - len(formerlyActiveLaunchPlanModelOutput.LaunchPlans) > 0 { + request.GetId().GetProject(), request.GetId().GetDomain(), request.GetId().GetName()) + } else if len(formerlyActiveLaunchPlanModelOutput.LaunchPlans) > 0 { formerlyActiveLaunchPlanModel = &formerlyActiveLaunchPlanModelOutput.LaunchPlans[0] err = m.updateLaunchPlanModelState(formerlyActiveLaunchPlanModel, admin.LaunchPlanState_INACTIVE) if err != nil { @@ -322,7 +321,7 @@ func (m *LaunchPlanManager) enableLaunchPlan(ctx context.Context, request *admin err = m.db.LaunchPlanRepo().SetActive(ctx, newlyActiveLaunchPlanModel, formerlyActiveLaunchPlanModel) if err != nil { logger.Debugf(ctx, - "Failed to set launchPlanModel with ID [%+v] to active with err %v", request.Id, err) + "Failed to set launchPlanModel with ID [%+v] to active with err %v", request.GetId(), err) return nil, err } return &admin.LaunchPlanUpdateResponse{}, nil @@ -331,11 +330,11 @@ func (m *LaunchPlanManager) enableLaunchPlan(ctx context.Context, request *admin func (m *LaunchPlanManager) UpdateLaunchPlan(ctx context.Context, request *admin.LaunchPlanUpdateRequest) ( *admin.LaunchPlanUpdateResponse, error) { - if err := validation.ValidateIdentifier(request.Id, common.LaunchPlan); err != nil { - logger.Debugf(ctx, "can't update launch plan [%+v] state, invalid identifier: %v", request.Id, err) + if err := validation.ValidateIdentifier(request.GetId(), common.LaunchPlan); err != nil { + logger.Debugf(ctx, "can't update launch plan [%+v] state, invalid identifier: %v", request.GetId(), err) } - ctx = getLaunchPlanContext(ctx, request.Id) - switch request.State { + ctx = getLaunchPlanContext(ctx, request.GetId()) + switch request.GetState() { case admin.LaunchPlanState_INACTIVE: return m.disableLaunchPlan(ctx, request) case admin.LaunchPlanState_ACTIVE: @@ -343,29 +342,29 @@ func (m *LaunchPlanManager) UpdateLaunchPlan(ctx context.Context, request *admin default: return nil, errors.NewFlyteAdminErrorf( codes.InvalidArgument, "Unrecognized launch plan state %v for update for launch plan [%+v]", - request.State, request.Id) + request.GetState(), request.GetId()) } } func (m *LaunchPlanManager) GetLaunchPlan(ctx context.Context, request *admin.ObjectGetRequest) ( *admin.LaunchPlan, error) { - if err := validation.ValidateIdentifier(request.Id, common.LaunchPlan); err != nil { - logger.Debugf(ctx, "can't get launch plan [%+v] with invalid identifier: %v", request.Id, err) + if err := validation.ValidateIdentifier(request.GetId(), common.LaunchPlan); err != nil { + logger.Debugf(ctx, "can't get launch plan [%+v] with invalid identifier: %v", request.GetId(), err) return nil, err } - ctx = getLaunchPlanContext(ctx, request.Id) - return util.GetLaunchPlan(ctx, m.db, request.Id) + ctx = getLaunchPlanContext(ctx, request.GetId()) + return util.GetLaunchPlan(ctx, m.db, request.GetId()) } func (m *LaunchPlanManager) GetActiveLaunchPlan(ctx context.Context, request *admin.ActiveLaunchPlanRequest) ( *admin.LaunchPlan, error) { if err := validation.ValidateActiveLaunchPlanRequest(request); err != nil { - logger.Debugf(ctx, "can't get active launch plan [%+v] with invalid request: %v", request.Id, err) + logger.Debugf(ctx, "can't get active launch plan [%+v] with invalid request: %v", request.GetId(), err) return nil, err } - ctx = m.getNamedEntityContext(ctx, request.Id) + ctx = m.getNamedEntityContext(ctx, request.GetId()) - filters, err := util.GetActiveLaunchPlanVersionFilters(request.Id.Project, request.Id.Domain, request.Id.Name) + filters, err := util.GetActiveLaunchPlanVersionFilters(request.GetId().GetProject(), request.GetId().GetDomain(), request.GetId().GetName()) if err != nil { return nil, err } @@ -383,7 +382,7 @@ func (m *LaunchPlanManager) GetActiveLaunchPlan(ctx context.Context, request *ad } if len(output.LaunchPlans) != 1 { - return nil, errors.NewFlyteAdminErrorf(codes.NotFound, "No active launch plan could be found: %s:%s:%s", request.Id.Project, request.Id.Domain, request.Id.Name) + return nil, errors.NewFlyteAdminErrorf(codes.NotFound, "No active launch plan could be found: %s:%s:%s", request.GetId().GetProject(), request.GetId().GetDomain(), request.GetId().GetName()) } return transformers.FromLaunchPlanModel(output.LaunchPlans[0]) @@ -397,30 +396,30 @@ func (m *LaunchPlanManager) ListLaunchPlans(ctx context.Context, request *admin. logger.Debugf(ctx, "") return nil, err } - ctx = m.getNamedEntityContext(ctx, request.Id) + ctx = m.getNamedEntityContext(ctx, request.GetId()) filters, err := util.GetDbFilters(util.FilterSpec{ - Project: request.Id.Project, - Domain: request.Id.Domain, - Name: request.Id.Name, - RequestFilters: request.Filters, + Project: request.GetId().GetProject(), + Domain: request.GetId().GetDomain(), + Name: request.GetId().GetName(), + RequestFilters: request.GetFilters(), }, common.LaunchPlan) if err != nil { return nil, err } - sortParameter, err := common.NewSortParameter(request.SortBy, models.LaunchPlanColumns) + sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.LaunchPlanColumns) if err != nil { return nil, err } - offset, err := validation.ValidateToken(request.Token) + offset, err := validation.ValidateToken(request.GetToken()) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, - "invalid pagination token %s for ListLaunchPlans", request.Token) + "invalid pagination token %s for ListLaunchPlans", request.GetToken()) } listLaunchPlansInput := repoInterfaces.ListResourceInput{ - Limit: int(request.Limit), + Limit: int(request.GetLimit()), Offset: offset, InlineFilters: filters, SortParameter: sortParameter, @@ -438,7 +437,7 @@ func (m *LaunchPlanManager) ListLaunchPlans(ctx context.Context, request *admin. return nil, err } var token string - if len(output.LaunchPlans) == int(request.Limit) { + if len(output.LaunchPlans) == int(request.GetLimit()) { token = strconv.Itoa(offset + len(output.LaunchPlans)) } return &admin.LaunchPlanList{ @@ -455,25 +454,25 @@ func (m *LaunchPlanManager) ListActiveLaunchPlans(ctx context.Context, request * logger.Debugf(ctx, "") return nil, err } - ctx = contextutils.WithProjectDomain(ctx, request.Project, request.Domain) + ctx = contextutils.WithProjectDomain(ctx, request.GetProject(), request.GetDomain()) - filters, err := util.ListActiveLaunchPlanVersionsFilters(request.Project, request.Domain) + filters, err := util.ListActiveLaunchPlanVersionsFilters(request.GetProject(), request.GetDomain()) if err != nil { return nil, err } - sortParameter, err := common.NewSortParameter(request.SortBy, models.LaunchPlanColumns) + sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.LaunchPlanColumns) if err != nil { return nil, err } - offset, err := validation.ValidateToken(request.Token) + offset, err := validation.ValidateToken(request.GetToken()) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, - "invalid pagination token %s for ListActiveLaunchPlans", request.Token) + "invalid pagination token %s for ListActiveLaunchPlans", request.GetToken()) } listLaunchPlansInput := repoInterfaces.ListResourceInput{ - Limit: int(request.Limit), + Limit: int(request.GetLimit()), Offset: offset, InlineFilters: filters, SortParameter: sortParameter, @@ -491,7 +490,7 @@ func (m *LaunchPlanManager) ListActiveLaunchPlans(ctx context.Context, request * return nil, err } var token string - if len(output.LaunchPlans) == int(request.Limit) { + if len(output.LaunchPlans) == int(request.GetLimit()) { token = strconv.Itoa(offset + len(output.LaunchPlans)) } return &admin.LaunchPlanList{ @@ -503,26 +502,26 @@ func (m *LaunchPlanManager) ListActiveLaunchPlans(ctx context.Context, request * // At least project name and domain must be specified along with limit. func (m *LaunchPlanManager) ListLaunchPlanIds(ctx context.Context, request *admin.NamedEntityIdentifierListRequest) ( *admin.NamedEntityIdentifierList, error) { - ctx = contextutils.WithProjectDomain(ctx, request.Project, request.Domain) + ctx = contextutils.WithProjectDomain(ctx, request.GetProject(), request.GetDomain()) filters, err := util.GetDbFilters(util.FilterSpec{ - Project: request.Project, - Domain: request.Domain, + Project: request.GetProject(), + Domain: request.GetDomain(), }, common.LaunchPlan) if err != nil { return nil, err } - sortParameter, err := common.NewSortParameter(request.SortBy, models.LaunchPlanColumns) + sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.LaunchPlanColumns) if err != nil { return nil, err } - offset, err := validation.ValidateToken(request.Token) + offset, err := validation.ValidateToken(request.GetToken()) if err != nil { - return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid pagination token %s", request.Token) + return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid pagination token %s", request.GetToken()) } listLaunchPlansInput := repoInterfaces.ListResourceInput{ - Limit: int(request.Limit), + Limit: int(request.GetLimit()), Offset: offset, InlineFilters: filters, SortParameter: sortParameter, @@ -535,7 +534,7 @@ func (m *LaunchPlanManager) ListLaunchPlanIds(ctx context.Context, request *admi return nil, err } var token string - if len(output.LaunchPlans) == int(request.Limit) { + if len(output.LaunchPlans) == int(request.GetLimit()) { token = strconv.Itoa(offset + len(output.LaunchPlans)) } return &admin.NamedEntityIdentifierList{ diff --git a/flyteadmin/pkg/manager/impl/launch_plan_manager_test.go b/flyteadmin/pkg/manager/impl/launch_plan_manager_test.go index d40d7c5e1f..3d551c4bc6 100644 --- a/flyteadmin/pkg/manager/impl/launch_plan_manager_test.go +++ b/flyteadmin/pkg/manager/impl/launch_plan_manager_test.go @@ -59,7 +59,7 @@ func getMockConfigForLpTest() runtimeInterfaces.Configuration { func setDefaultWorkflowCallbackForLpTest(repository interfaces.Repository) { workflowSpec := testutils.GetSampleWorkflowSpecForTest() - typedInterface, _ := proto.Marshal(workflowSpec.Template.Interface) + typedInterface, _ := proto.Marshal(workflowSpec.GetTemplate().GetInterface()) workflowGetFunc := func(input interfaces.Identifier) (models.Workflow, error) { return models.Workflow{ WorkflowKey: models.WorkflowKey{ @@ -107,10 +107,10 @@ func TestLaunchPlanManager_GetLaunchPlan(t *testing.T) { workflowRequest := testutils.GetWorkflowRequest() closure := admin.LaunchPlanClosure{ - ExpectedInputs: lpRequest.Spec.DefaultInputs, - ExpectedOutputs: workflowRequest.Spec.Template.Interface.Outputs, + ExpectedInputs: lpRequest.GetSpec().GetDefaultInputs(), + ExpectedOutputs: workflowRequest.GetSpec().GetTemplate().GetInterface().GetOutputs(), } - specBytes, _ := proto.Marshal(lpRequest.Spec) + specBytes, _ := proto.Marshal(lpRequest.GetSpec()) closureBytes, _ := proto.Marshal(&closure) launchPlanGetFunc := func(input interfaces.Identifier) (models.LaunchPlan, error) { @@ -143,10 +143,10 @@ func TestLaunchPlanManager_GetActiveLaunchPlan(t *testing.T) { workflowRequest := testutils.GetWorkflowRequest() closure := admin.LaunchPlanClosure{ - ExpectedInputs: lpRequest.Spec.DefaultInputs, - ExpectedOutputs: workflowRequest.Spec.Template.Interface.Outputs, + ExpectedInputs: lpRequest.GetSpec().GetDefaultInputs(), + ExpectedOutputs: workflowRequest.GetSpec().GetTemplate().GetInterface().GetOutputs(), } - specBytes, _ := proto.Marshal(lpRequest.Spec) + specBytes, _ := proto.Marshal(lpRequest.GetSpec()) closureBytes, _ := proto.Marshal(&closure) launchPlanListFunc := func(input interfaces.ListResourceInput) (interfaces.LaunchPlanCollectionOutput, error) { @@ -169,10 +169,10 @@ func TestLaunchPlanManager_GetActiveLaunchPlan(t *testing.T) { LaunchPlans: []models.LaunchPlan{ { LaunchPlanKey: models.LaunchPlanKey{ - Project: lpRequest.Id.Project, - Domain: lpRequest.Id.Domain, - Name: lpRequest.Id.Name, - Version: lpRequest.Id.Version, + Project: lpRequest.GetId().GetProject(), + Domain: lpRequest.GetId().GetDomain(), + Name: lpRequest.GetId().GetName(), + Version: lpRequest.GetId().GetVersion(), }, Spec: specBytes, Closure: closureBytes, @@ -185,9 +185,9 @@ func TestLaunchPlanManager_GetActiveLaunchPlan(t *testing.T) { repository.LaunchPlanRepo().(*repositoryMocks.MockLaunchPlanRepo).SetListCallback(launchPlanListFunc) response, err := lpManager.GetActiveLaunchPlan(context.Background(), &admin.ActiveLaunchPlanRequest{ Id: &admin.NamedEntityIdentifier{ - Project: lpRequest.Id.Project, - Domain: lpRequest.Id.Domain, - Name: lpRequest.Id.Name, + Project: lpRequest.GetId().GetProject(), + Domain: lpRequest.GetId().GetDomain(), + Name: lpRequest.GetId().GetName(), }, }) assert.NoError(t, err) @@ -205,9 +205,9 @@ func TestLaunchPlanManager_GetActiveLaunchPlan_NoneActive(t *testing.T) { repository.LaunchPlanRepo().(*repositoryMocks.MockLaunchPlanRepo).SetListCallback(launchPlanListFunc) response, err := lpManager.GetActiveLaunchPlan(context.Background(), &admin.ActiveLaunchPlanRequest{ Id: &admin.NamedEntityIdentifier{ - Project: lpRequest.Id.Project, - Domain: lpRequest.Id.Domain, - Name: lpRequest.Id.Name, + Project: lpRequest.GetId().GetProject(), + Domain: lpRequest.GetId().GetDomain(), + Name: lpRequest.GetId().GetName(), }, }) assert.EqualError(t, err, "No active launch plan could be found: project:domain:name") @@ -298,11 +298,11 @@ func TestCreateLaunchPlanValidateCreate(t *testing.T) { setDefaultWorkflowCallbackForLpTest(repository) lpCreateFunc := func(input models.LaunchPlan) error { launchPlan, _ := transformers.FromLaunchPlanModel(input) - assert.Equal(t, project, launchPlan.Id.Project) - assert.Equal(t, domain, launchPlan.Id.Domain) - assert.Equal(t, name, launchPlan.Id.Name) - assert.Equal(t, version, launchPlan.Id.Version) - assert.True(t, proto.Equal(testutils.GetLaunchPlanRequest().Spec, launchPlan.Spec)) + assert.Equal(t, project, launchPlan.GetId().GetProject()) + assert.Equal(t, domain, launchPlan.GetId().GetDomain()) + assert.Equal(t, name, launchPlan.GetId().GetName()) + assert.Equal(t, version, launchPlan.GetId().GetVersion()) + assert.True(t, proto.Equal(testutils.GetLaunchPlanRequest().GetSpec(), launchPlan.GetSpec())) expectedInputs := &core.ParameterMap{ Parameters: map[string]*core.Parameter{ "foo": { @@ -315,9 +315,9 @@ func TestCreateLaunchPlanValidateCreate(t *testing.T) { }, }, } - assert.True(t, proto.Equal(expectedInputs, launchPlan.Closure.ExpectedInputs)) - assert.True(t, proto.Equal(testutils.GetSampleWorkflowSpecForTest().Template.Interface.Outputs, - launchPlan.Closure.ExpectedOutputs)) + assert.True(t, proto.Equal(expectedInputs, launchPlan.GetClosure().GetExpectedInputs())) + assert.True(t, proto.Equal(testutils.GetSampleWorkflowSpecForTest().GetTemplate().GetInterface().GetOutputs(), + launchPlan.GetClosure().GetExpectedOutputs())) return nil } repository.LaunchPlanRepo().(*repositoryMocks.MockLaunchPlanRepo).SetCreateCallback(lpCreateFunc) @@ -350,15 +350,15 @@ func TestCreateLaunchPlanNoWorkflowInterface(t *testing.T) { repository.WorkflowRepo().(*repositoryMocks.MockWorkflowRepo).SetGetCallback(workflowGetFunc) lpCreateFunc := func(input models.LaunchPlan) error { launchPlan, _ := transformers.FromLaunchPlanModel(input) - assert.Equal(t, project, launchPlan.Id.Project) - assert.Equal(t, domain, launchPlan.Id.Domain) - assert.Equal(t, name, launchPlan.Id.Name) - assert.Equal(t, version, launchPlan.Id.Version) - expectedLaunchPlanSpec := testutils.GetLaunchPlanRequest().Spec + assert.Equal(t, project, launchPlan.GetId().GetProject()) + assert.Equal(t, domain, launchPlan.GetId().GetDomain()) + assert.Equal(t, name, launchPlan.GetId().GetName()) + assert.Equal(t, version, launchPlan.GetId().GetVersion()) + expectedLaunchPlanSpec := testutils.GetLaunchPlanRequest().GetSpec() expectedLaunchPlanSpec.FixedInputs = nil expectedLaunchPlanSpec.DefaultInputs.Parameters = map[string]*core.Parameter{} - assert.EqualValues(t, expectedLaunchPlanSpec.String(), launchPlan.Spec.String()) - assert.Empty(t, launchPlan.Closure.ExpectedInputs) + assert.EqualValues(t, expectedLaunchPlanSpec.String(), launchPlan.GetSpec().String()) + assert.Empty(t, launchPlan.GetClosure().GetExpectedInputs()) return nil } repository.LaunchPlanRepo().(*repositoryMocks.MockLaunchPlanRepo).SetCreateCallback(lpCreateFunc) @@ -1058,10 +1058,10 @@ func TestLaunchPlanManager_ListLaunchPlans(t *testing.T) { workflowRequest := testutils.GetWorkflowRequest() closure := admin.LaunchPlanClosure{ - ExpectedInputs: lpRequest.Spec.DefaultInputs, - ExpectedOutputs: workflowRequest.Spec.Template.Interface.Outputs, + ExpectedInputs: lpRequest.GetSpec().GetDefaultInputs(), + ExpectedOutputs: workflowRequest.GetSpec().GetTemplate().GetInterface().GetOutputs(), } - specBytes, _ := proto.Marshal(lpRequest.Spec) + specBytes, _ := proto.Marshal(lpRequest.GetSpec()) closureBytes, _ := proto.Marshal(&closure) createdAt := time.Now() @@ -1146,14 +1146,14 @@ func TestLaunchPlanManager_ListLaunchPlans(t *testing.T) { }, }) assert.NoError(t, err) - assert.Equal(t, 2, len(lpList.LaunchPlans)) - for idx, lp := range lpList.LaunchPlans { - assert.Equal(t, project, lp.Id.Project) - assert.Equal(t, domain, lp.Id.Domain) - assert.Equal(t, name, lp.Id.Name) - assert.Equal(t, fmt.Sprintf("%v", idx+1), lp.Id.Version) - assert.True(t, proto.Equal(createdAtProto, lp.Closure.CreatedAt)) - assert.True(t, proto.Equal(updatedAtProto, lp.Closure.UpdatedAt)) + assert.Equal(t, 2, len(lpList.GetLaunchPlans())) + for idx, lp := range lpList.GetLaunchPlans() { + assert.Equal(t, project, lp.GetId().GetProject()) + assert.Equal(t, domain, lp.GetId().GetDomain()) + assert.Equal(t, name, lp.GetId().GetName()) + assert.Equal(t, fmt.Sprintf("%v", idx+1), lp.GetId().GetVersion()) + assert.True(t, proto.Equal(createdAtProto, lp.GetClosure().GetCreatedAt())) + assert.True(t, proto.Equal(updatedAtProto, lp.GetClosure().GetUpdatedAt())) } } @@ -1165,10 +1165,10 @@ func TestLaunchPlanManager_ListLaunchPlanIds(t *testing.T) { workflowRequest := testutils.GetWorkflowRequest() closure := admin.LaunchPlanClosure{ - ExpectedInputs: lpRequest.Spec.DefaultInputs, - ExpectedOutputs: workflowRequest.Spec.Template.Interface.Outputs, + ExpectedInputs: lpRequest.GetSpec().GetDefaultInputs(), + ExpectedOutputs: workflowRequest.GetSpec().GetTemplate().GetInterface().GetOutputs(), } - specBytes, _ := proto.Marshal(lpRequest.Spec) + specBytes, _ := proto.Marshal(lpRequest.GetSpec()) closureBytes, _ := proto.Marshal(&closure) launchPlanListFunc := func(input interfaces.ListResourceInput) ( @@ -1232,11 +1232,11 @@ func TestLaunchPlanManager_ListLaunchPlanIds(t *testing.T) { }, }) assert.NoError(t, err) - assert.Equal(t, 2, len(lpList.Entities)) - for _, id := range lpList.Entities { - assert.Equal(t, project, id.Project) - assert.Equal(t, domain, id.Domain) - assert.Equal(t, name, id.Name) + assert.Equal(t, 2, len(lpList.GetEntities())) + for _, id := range lpList.GetEntities() { + assert.Equal(t, project, id.GetProject()) + assert.Equal(t, domain, id.GetDomain()) + assert.Equal(t, name, id.GetName()) } } @@ -1248,10 +1248,10 @@ func TestLaunchPlanManager_ListActiveLaunchPlans(t *testing.T) { workflowRequest := testutils.GetWorkflowRequest() closure := admin.LaunchPlanClosure{ - ExpectedInputs: lpRequest.Spec.DefaultInputs, - ExpectedOutputs: workflowRequest.Spec.Template.Interface.Outputs, + ExpectedInputs: lpRequest.GetSpec().GetDefaultInputs(), + ExpectedOutputs: workflowRequest.GetSpec().GetTemplate().GetInterface().GetOutputs(), } - specBytes, _ := proto.Marshal(lpRequest.Spec) + specBytes, _ := proto.Marshal(lpRequest.GetSpec()) closureBytes, _ := proto.Marshal(&closure) launchPlanListFunc := func(input interfaces.ListResourceInput) ( @@ -1319,11 +1319,11 @@ func TestLaunchPlanManager_ListActiveLaunchPlans(t *testing.T) { }, }) assert.NoError(t, err) - assert.Equal(t, 2, len(lpList.LaunchPlans)) - for _, id := range lpList.LaunchPlans { - assert.Equal(t, project, id.Id.Project) - assert.Equal(t, domain, id.Id.Domain) - assert.Equal(t, name, id.Id.Name) + assert.Equal(t, 2, len(lpList.GetLaunchPlans())) + for _, id := range lpList.GetLaunchPlans() { + assert.Equal(t, project, id.GetId().GetProject()) + assert.Equal(t, domain, id.GetId().GetDomain()) + assert.Equal(t, name, id.GetId().GetName()) } } diff --git a/flyteadmin/pkg/manager/impl/metrics_manager.go b/flyteadmin/pkg/manager/impl/metrics_manager.go index 231909f4e8..a689c60a70 100644 --- a/flyteadmin/pkg/manager/impl/metrics_manager.go +++ b/flyteadmin/pkg/manager/impl/metrics_manager.go @@ -70,18 +70,18 @@ func createOperationSpan(startTime, endTime *timestamp.Timestamp, operation stri // getBranchNode searches the provided BranchNode definition for the Node identified by nodeID. func getBranchNode(nodeID string, branchNode *core.BranchNode) *core.Node { - if branchNode.IfElse.Case.ThenNode.Id == nodeID { - return branchNode.IfElse.Case.ThenNode + if branchNode.GetIfElse().GetCase().GetThenNode().GetId() == nodeID { + return branchNode.GetIfElse().GetCase().GetThenNode() } - for _, other := range branchNode.IfElse.Other { - if other.ThenNode.Id == nodeID { - return other.ThenNode + for _, other := range branchNode.GetIfElse().GetOther() { + if other.GetThenNode().GetId() == nodeID { + return other.GetThenNode() } } - if elseNode, ok := branchNode.IfElse.Default.(*core.IfElseBlock_ElseNode); ok { - if elseNode.ElseNode.Id == nodeID { + if elseNode, ok := branchNode.GetIfElse().GetDefault().(*core.IfElseBlock_ElseNode); ok { + if elseNode.ElseNode.GetId() == nodeID { return elseNode.ElseNode } } @@ -98,13 +98,13 @@ func (m *MetricsManager) getLatestUpstreamNodeExecution(nodeID string, upstreamN var nodeExecution *admin.NodeExecution var latestUpstreamUpdatedAt = time.Unix(0, 0) if connectionSet, exists := upstreamNodeIds[nodeID]; exists { - for _, upstreamNodeID := range connectionSet.Ids { + for _, upstreamNodeID := range connectionSet.GetIds() { upstreamNodeExecution, exists := nodeExecutions[upstreamNodeID] if !exists { continue } - t := upstreamNodeExecution.Closure.UpdatedAt.AsTime() + t := upstreamNodeExecution.GetClosure().GetUpdatedAt().AsTime() if t.After(latestUpstreamUpdatedAt) { nodeExecution = upstreamNodeExecution latestUpstreamUpdatedAt = t @@ -124,15 +124,15 @@ func (m *MetricsManager) getNodeExecutions(ctx context.Context, request *admin.N return nil, err } - for _, nodeExecution := range response.NodeExecutions { - nodeExecutions[nodeExecution.Metadata.SpecNodeId] = nodeExecution + for _, nodeExecution := range response.GetNodeExecutions() { + nodeExecutions[nodeExecution.GetMetadata().GetSpecNodeId()] = nodeExecution } - if len(response.NodeExecutions) < int(request.Limit) { + if len(response.GetNodeExecutions()) < int(request.GetLimit()) { break } - request.Token = response.Token + request.Token = response.GetToken() } return nodeExecutions, nil @@ -147,13 +147,13 @@ func (m *MetricsManager) getTaskExecutions(ctx context.Context, request *admin.T return nil, err } - taskExecutions = append(taskExecutions, response.TaskExecutions...) + taskExecutions = append(taskExecutions, response.GetTaskExecutions()...) - if len(response.TaskExecutions) < int(request.Limit) { + if len(response.GetTaskExecutions()) < int(request.GetLimit()) { break } - request.Token = response.Token + request.Token = response.GetToken() } return taskExecutions, nil @@ -166,9 +166,9 @@ func (m *MetricsManager) parseBranchNodeExecution(ctx context.Context, // retrieve node execution(s) nodeExecutions, err := m.getNodeExecutions(ctx, &admin.NodeExecutionListRequest{ - WorkflowExecutionId: nodeExecution.Id.ExecutionId, + WorkflowExecutionId: nodeExecution.GetId().GetExecutionId(), Limit: RequestLimit, - UniqueParentId: nodeExecution.Id.NodeId, + UniqueParentId: nodeExecution.GetId().GetNodeId(), }) if err != nil { return err @@ -176,7 +176,7 @@ func (m *MetricsManager) parseBranchNodeExecution(ctx context.Context, // check if the node started if len(nodeExecutions) == 0 { - *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, nodeExecution.Closure.UpdatedAt, nodeSetup)) + *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetCreatedAt(), nodeExecution.GetClosure().GetUpdatedAt(), nodeSetup)) } else { // parse branchNode if len(nodeExecutions) != 1 { @@ -188,14 +188,14 @@ func (m *MetricsManager) parseBranchNodeExecution(ctx context.Context, branchNodeExecution = e } - node := getBranchNode(branchNodeExecution.Metadata.SpecNodeId, branchNode) + node := getBranchNode(branchNodeExecution.GetMetadata().GetSpecNodeId(), branchNode) if node == nil { return fmt.Errorf("failed to identify branch node final node definition for nodeID '%s' and branchNode '%+v'", - branchNodeExecution.Metadata.SpecNodeId, branchNode) + branchNodeExecution.GetMetadata().GetSpecNodeId(), branchNode) } // frontend overhead - *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, branchNodeExecution.Closure.CreatedAt, nodeSetup)) + *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetCreatedAt(), branchNodeExecution.GetClosure().GetCreatedAt(), nodeSetup)) // node execution nodeExecutionSpan, err := m.parseNodeExecution(ctx, branchNodeExecution, node, depth) @@ -206,9 +206,9 @@ func (m *MetricsManager) parseBranchNodeExecution(ctx context.Context, *spans = append(*spans, nodeExecutionSpan) // backend overhead - if !nodeExecution.Closure.UpdatedAt.AsTime().Before(branchNodeExecution.Closure.UpdatedAt.AsTime()) { - *spans = append(*spans, createOperationSpan(branchNodeExecution.Closure.UpdatedAt, - nodeExecution.Closure.UpdatedAt, nodeTeardown)) + if !nodeExecution.GetClosure().GetUpdatedAt().AsTime().Before(branchNodeExecution.GetClosure().GetUpdatedAt().AsTime()) { + *spans = append(*spans, createOperationSpan(branchNodeExecution.GetClosure().GetUpdatedAt(), + nodeExecution.GetClosure().GetUpdatedAt(), nodeTeardown)) } } @@ -219,7 +219,7 @@ func (m *MetricsManager) parseBranchNodeExecution(ctx context.Context, // which are appended to the provided spans argument. func (m *MetricsManager) parseDynamicNodeExecution(ctx context.Context, nodeExecution *admin.NodeExecution, spans *[]*core.Span, depth int) error { taskExecutions, err := m.getTaskExecutions(ctx, &admin.TaskExecutionListRequest{ - NodeExecutionId: nodeExecution.Id, + NodeExecutionId: nodeExecution.GetId(), Limit: RequestLimit, }) if err != nil { @@ -228,18 +228,18 @@ func (m *MetricsManager) parseDynamicNodeExecution(ctx context.Context, nodeExec // if no task executions then everything is execution overhead if len(taskExecutions) == 0 { - *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, nodeExecution.Closure.UpdatedAt, nodeSetup)) + *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetCreatedAt(), nodeExecution.GetClosure().GetUpdatedAt(), nodeSetup)) } else { // frontend overhead - *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, taskExecutions[0].Closure.CreatedAt, nodeSetup)) + *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetCreatedAt(), taskExecutions[0].GetClosure().GetCreatedAt(), nodeSetup)) // task execution(s) parseTaskExecutions(taskExecutions, spans, depth) nodeExecutions, err := m.getNodeExecutions(ctx, &admin.NodeExecutionListRequest{ - WorkflowExecutionId: nodeExecution.Id.ExecutionId, + WorkflowExecutionId: nodeExecution.GetId().GetExecutionId(), Limit: RequestLimit, - UniqueParentId: nodeExecution.Id.NodeId, + UniqueParentId: nodeExecution.GetId().GetNodeId(), }) if err != nil { return err @@ -247,31 +247,31 @@ func (m *MetricsManager) parseDynamicNodeExecution(ctx context.Context, nodeExec lastTask := taskExecutions[len(taskExecutions)-1] if len(nodeExecutions) == 0 { - if !nodeExecution.Closure.UpdatedAt.AsTime().Before(lastTask.Closure.UpdatedAt.AsTime()) { - *spans = append(*spans, createOperationSpan(lastTask.Closure.UpdatedAt, nodeExecution.Closure.UpdatedAt, nodeReset)) + if !nodeExecution.GetClosure().GetUpdatedAt().AsTime().Before(lastTask.GetClosure().GetUpdatedAt().AsTime()) { + *spans = append(*spans, createOperationSpan(lastTask.GetClosure().GetUpdatedAt(), nodeExecution.GetClosure().GetUpdatedAt(), nodeReset)) } } else { // between task execution(s) and node execution(s) overhead startNode := nodeExecutions[v1alpha1.StartNodeID] - *spans = append(*spans, createOperationSpan(taskExecutions[len(taskExecutions)-1].Closure.UpdatedAt, - startNode.Closure.UpdatedAt, nodeReset)) + *spans = append(*spans, createOperationSpan(taskExecutions[len(taskExecutions)-1].GetClosure().GetUpdatedAt(), + startNode.GetClosure().GetUpdatedAt(), nodeReset)) // node execution(s) - getDataRequest := &admin.NodeExecutionGetDataRequest{Id: nodeExecution.Id} + getDataRequest := &admin.NodeExecutionGetDataRequest{Id: nodeExecution.GetId()} nodeExecutionData, err := m.nodeExecutionManager.GetNodeExecutionData(ctx, getDataRequest) if err != nil { return err } - if err := m.parseNodeExecutions(ctx, nodeExecutions, nodeExecutionData.DynamicWorkflow.CompiledWorkflow, spans, depth); err != nil { + if err := m.parseNodeExecutions(ctx, nodeExecutions, nodeExecutionData.GetDynamicWorkflow().GetCompiledWorkflow(), spans, depth); err != nil { return err } // backend overhead latestUpstreamNode := m.getLatestUpstreamNodeExecution(v1alpha1.EndNodeID, - nodeExecutionData.DynamicWorkflow.CompiledWorkflow.Primary.Connections.Upstream, nodeExecutions) - if latestUpstreamNode != nil && !nodeExecution.Closure.UpdatedAt.AsTime().Before(latestUpstreamNode.Closure.UpdatedAt.AsTime()) { - *spans = append(*spans, createOperationSpan(latestUpstreamNode.Closure.UpdatedAt, nodeExecution.Closure.UpdatedAt, nodeTeardown)) + nodeExecutionData.GetDynamicWorkflow().GetCompiledWorkflow().GetPrimary().GetConnections().GetUpstream(), nodeExecutions) + if latestUpstreamNode != nil && !nodeExecution.GetClosure().GetUpdatedAt().AsTime().Before(latestUpstreamNode.GetClosure().GetUpdatedAt().AsTime()) { + *spans = append(*spans, createOperationSpan(latestUpstreamNode.GetClosure().GetUpdatedAt(), nodeExecution.GetClosure().GetUpdatedAt(), nodeTeardown)) } } } @@ -285,14 +285,14 @@ func (m *MetricsManager) parseExecution(ctx context.Context, execution *admin.Ex spans := make([]*core.Span, 0) if depth != 0 { // retrieve workflow and node executions - workflowRequest := &admin.ObjectGetRequest{Id: execution.Closure.WorkflowId} + workflowRequest := &admin.ObjectGetRequest{Id: execution.GetClosure().GetWorkflowId()} workflow, err := m.workflowManager.GetWorkflow(ctx, workflowRequest) if err != nil { return nil, err } nodeExecutions, err := m.getNodeExecutions(ctx, &admin.NodeExecutionListRequest{ - WorkflowExecutionId: execution.Id, + WorkflowExecutionId: execution.GetId(), Limit: RequestLimit, }) if err != nil { @@ -301,32 +301,32 @@ func (m *MetricsManager) parseExecution(ctx context.Context, execution *admin.Ex // check if workflow has started startNode := nodeExecutions[v1alpha1.StartNodeID] - if startNode.Closure.UpdatedAt == nil || reflect.DeepEqual(startNode.Closure.UpdatedAt, emptyTimestamp) { - spans = append(spans, createOperationSpan(execution.Closure.CreatedAt, execution.Closure.UpdatedAt, workflowSetup)) + if startNode.GetClosure().GetUpdatedAt() == nil || reflect.DeepEqual(startNode.GetClosure().GetUpdatedAt(), emptyTimestamp) { + spans = append(spans, createOperationSpan(execution.GetClosure().GetCreatedAt(), execution.GetClosure().GetUpdatedAt(), workflowSetup)) } else { // compute frontend overhead - spans = append(spans, createOperationSpan(execution.Closure.CreatedAt, startNode.Closure.UpdatedAt, workflowSetup)) + spans = append(spans, createOperationSpan(execution.GetClosure().GetCreatedAt(), startNode.GetClosure().GetUpdatedAt(), workflowSetup)) // iterate over nodes and compute overhead - if err := m.parseNodeExecutions(ctx, nodeExecutions, workflow.Closure.CompiledWorkflow, &spans, depth-1); err != nil { + if err := m.parseNodeExecutions(ctx, nodeExecutions, workflow.GetClosure().GetCompiledWorkflow(), &spans, depth-1); err != nil { return nil, err } // compute backend overhead latestUpstreamNode := m.getLatestUpstreamNodeExecution(v1alpha1.EndNodeID, - workflow.Closure.CompiledWorkflow.Primary.Connections.Upstream, nodeExecutions) - if latestUpstreamNode != nil && !execution.Closure.UpdatedAt.AsTime().Before(latestUpstreamNode.Closure.UpdatedAt.AsTime()) { - spans = append(spans, createOperationSpan(latestUpstreamNode.Closure.UpdatedAt, - execution.Closure.UpdatedAt, workflowTeardown)) + workflow.GetClosure().GetCompiledWorkflow().GetPrimary().GetConnections().GetUpstream(), nodeExecutions) + if latestUpstreamNode != nil && !execution.GetClosure().GetUpdatedAt().AsTime().Before(latestUpstreamNode.GetClosure().GetUpdatedAt().AsTime()) { + spans = append(spans, createOperationSpan(latestUpstreamNode.GetClosure().GetUpdatedAt(), + execution.GetClosure().GetUpdatedAt(), workflowTeardown)) } } } return &core.Span{ - StartTime: execution.Closure.CreatedAt, - EndTime: execution.Closure.UpdatedAt, + StartTime: execution.GetClosure().GetCreatedAt(), + EndTime: execution.GetClosure().GetUpdatedAt(), Id: &core.Span_WorkflowId{ - WorkflowId: execution.Id, + WorkflowId: execution.GetId(), }, Spans: spans, }, nil @@ -336,23 +336,23 @@ func (m *MetricsManager) parseExecution(ctx context.Context, execution *admin.Ex // which are appended to the provided spans argument. func (m *MetricsManager) parseGateNodeExecution(_ context.Context, nodeExecution *admin.NodeExecution, spans *[]*core.Span) { // check if node has started yet - if nodeExecution.Closure.StartedAt == nil || reflect.DeepEqual(nodeExecution.Closure.StartedAt, emptyTimestamp) { - *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, nodeExecution.Closure.UpdatedAt, nodeSetup)) + if nodeExecution.GetClosure().GetStartedAt() == nil || reflect.DeepEqual(nodeExecution.GetClosure().GetStartedAt(), emptyTimestamp) { + *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetCreatedAt(), nodeExecution.GetClosure().GetUpdatedAt(), nodeSetup)) } else { // frontend overhead - *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, nodeExecution.Closure.StartedAt, nodeSetup)) + *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetCreatedAt(), nodeExecution.GetClosure().GetStartedAt(), nodeSetup)) // check if plugin has completed yet - if nodeExecution.Closure.Duration == nil || reflect.DeepEqual(nodeExecution.Closure.Duration, emptyDuration) { - *spans = append(*spans, createOperationSpan(nodeExecution.Closure.StartedAt, - nodeExecution.Closure.UpdatedAt, nodeIdle)) + if nodeExecution.GetClosure().GetDuration() == nil || reflect.DeepEqual(nodeExecution.GetClosure().GetDuration(), emptyDuration) { + *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetStartedAt(), + nodeExecution.GetClosure().GetUpdatedAt(), nodeIdle)) } else { // idle time - nodeEndTime := timestamppb.New(nodeExecution.Closure.StartedAt.AsTime().Add(nodeExecution.Closure.Duration.AsDuration())) - *spans = append(*spans, createOperationSpan(nodeExecution.Closure.StartedAt, nodeEndTime, nodeIdle)) + nodeEndTime := timestamppb.New(nodeExecution.GetClosure().GetStartedAt().AsTime().Add(nodeExecution.GetClosure().GetDuration().AsDuration())) + *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetStartedAt(), nodeEndTime, nodeIdle)) // backend overhead - *spans = append(*spans, createOperationSpan(nodeEndTime, nodeExecution.Closure.UpdatedAt, nodeTeardown)) + *spans = append(*spans, createOperationSpan(nodeEndTime, nodeExecution.GetClosure().GetUpdatedAt(), nodeTeardown)) } } } @@ -361,19 +361,19 @@ func (m *MetricsManager) parseGateNodeExecution(_ context.Context, nodeExecution // Spans which are appended to the provided spans argument. func (m *MetricsManager) parseLaunchPlanNodeExecution(ctx context.Context, nodeExecution *admin.NodeExecution, spans *[]*core.Span, depth int) error { // check if workflow started yet - workflowNode := nodeExecution.Closure.GetWorkflowNodeMetadata() + workflowNode := nodeExecution.GetClosure().GetWorkflowNodeMetadata() if workflowNode == nil { - *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, nodeExecution.Closure.UpdatedAt, nodeSetup)) + *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetCreatedAt(), nodeExecution.GetClosure().GetUpdatedAt(), nodeSetup)) } else { // retrieve execution - executionRequest := &admin.WorkflowExecutionGetRequest{Id: workflowNode.ExecutionId} + executionRequest := &admin.WorkflowExecutionGetRequest{Id: workflowNode.GetExecutionId()} execution, err := m.executionManager.GetExecution(ctx, executionRequest) if err != nil { return err } // frontend overhead - *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, execution.Closure.CreatedAt, nodeSetup)) + *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetCreatedAt(), execution.GetClosure().GetCreatedAt(), nodeSetup)) // execution span, err := m.parseExecution(ctx, execution, depth) @@ -384,8 +384,8 @@ func (m *MetricsManager) parseLaunchPlanNodeExecution(ctx context.Context, nodeE *spans = append(*spans, span) // backend overhead - if !nodeExecution.Closure.UpdatedAt.AsTime().Before(execution.Closure.UpdatedAt.AsTime()) { - *spans = append(*spans, createOperationSpan(execution.Closure.UpdatedAt, nodeExecution.Closure.UpdatedAt, nodeTeardown)) + if !nodeExecution.GetClosure().GetUpdatedAt().AsTime().Before(execution.GetClosure().GetUpdatedAt().AsTime()) { + *spans = append(*spans, createOperationSpan(execution.GetClosure().GetUpdatedAt(), nodeExecution.GetClosure().GetUpdatedAt(), nodeTeardown)) } } @@ -400,7 +400,7 @@ func (m *MetricsManager) parseNodeExecution(ctx context.Context, nodeExecution * // parse node var err error - switch target := node.Target.(type) { + switch target := node.GetTarget().(type) { case *core.Node_BranchNode: // handle branch node err = m.parseBranchNodeExecution(ctx, nodeExecution, target.BranchNode, &spans, depth-1) @@ -408,7 +408,7 @@ func (m *MetricsManager) parseNodeExecution(ctx context.Context, nodeExecution * // handle gate node m.parseGateNodeExecution(ctx, nodeExecution, &spans) case *core.Node_TaskNode: - if nodeExecution.Metadata.IsParentNode { + if nodeExecution.GetMetadata().GetIsParentNode() { // handle dynamic node err = m.parseDynamicNodeExecution(ctx, nodeExecution, &spans, depth-1) } else { @@ -416,7 +416,7 @@ func (m *MetricsManager) parseNodeExecution(ctx context.Context, nodeExecution * err = m.parseTaskNodeExecution(ctx, nodeExecution, &spans, depth-1) } case *core.Node_WorkflowNode: - switch workflow := target.WorkflowNode.Reference.(type) { + switch workflow := target.WorkflowNode.GetReference().(type) { case *core.WorkflowNode_LaunchplanRef: // handle launch plan err = m.parseLaunchPlanNodeExecution(ctx, nodeExecution, &spans, depth-1) @@ -436,10 +436,10 @@ func (m *MetricsManager) parseNodeExecution(ctx context.Context, nodeExecution * } return &core.Span{ - StartTime: nodeExecution.Closure.CreatedAt, - EndTime: nodeExecution.Closure.UpdatedAt, + StartTime: nodeExecution.GetClosure().GetCreatedAt(), + EndTime: nodeExecution.GetClosure().GetUpdatedAt(), Id: &core.Span_NodeId{ - NodeId: nodeExecution.Id, + NodeId: nodeExecution.GetId(), }, Spans: spans, }, nil @@ -456,29 +456,29 @@ func (m *MetricsManager) parseNodeExecutions(ctx context.Context, nodeExecutions sortedNodeExecutions = append(sortedNodeExecutions, nodeExecution) } sort.Slice(sortedNodeExecutions, func(i, j int) bool { - x := sortedNodeExecutions[i].Closure.CreatedAt.AsTime() - y := sortedNodeExecutions[j].Closure.CreatedAt.AsTime() + x := sortedNodeExecutions[i].GetClosure().GetCreatedAt().AsTime() + y := sortedNodeExecutions[j].GetClosure().GetCreatedAt().AsTime() return x.Before(y) }) // iterate over sorted node executions for _, nodeExecution := range sortedNodeExecutions { - specNodeID := nodeExecution.Metadata.SpecNodeId + specNodeID := nodeExecution.GetMetadata().GetSpecNodeId() if specNodeID == v1alpha1.StartNodeID || specNodeID == v1alpha1.EndNodeID { continue } // get node definition from workflow var node *core.Node - for _, n := range compiledWorkflowClosure.Primary.Template.Nodes { - if n.Id == specNodeID { + for _, n := range compiledWorkflowClosure.GetPrimary().GetTemplate().GetNodes() { + if n.GetId() == specNodeID { node = n } } if node == nil { return fmt.Errorf("failed to discover workflow node '%s' in workflow '%+v'", - specNodeID, compiledWorkflowClosure.Primary.Template.Id) + specNodeID, compiledWorkflowClosure.GetPrimary().GetTemplate().GetId()) } // parse node execution @@ -489,10 +489,10 @@ func (m *MetricsManager) parseNodeExecutions(ctx context.Context, nodeExecutions // prepend nodeExecution spans with node transition time latestUpstreamNode := m.getLatestUpstreamNodeExecution(specNodeID, - compiledWorkflowClosure.Primary.Connections.Upstream, nodeExecutions) + compiledWorkflowClosure.GetPrimary().GetConnections().GetUpstream(), nodeExecutions) if latestUpstreamNode != nil { - nodeExecutionSpan.Spans = append([]*core.Span{createOperationSpan(latestUpstreamNode.Closure.UpdatedAt, - nodeExecution.Closure.CreatedAt, nodeTransition)}, nodeExecutionSpan.Spans...) + nodeExecutionSpan.Spans = append([]*core.Span{createOperationSpan(latestUpstreamNode.GetClosure().GetUpdatedAt(), + nodeExecution.GetClosure().GetCreatedAt(), nodeTransition)}, nodeExecutionSpan.GetSpans()...) } *spans = append(*spans, nodeExecutionSpan) @@ -508,9 +508,9 @@ func (m *MetricsManager) parseSubworkflowNodeExecution(ctx context.Context, // retrieve node execution(s) nodeExecutions, err := m.getNodeExecutions(ctx, &admin.NodeExecutionListRequest{ - WorkflowExecutionId: nodeExecution.Id.ExecutionId, + WorkflowExecutionId: nodeExecution.GetId().GetExecutionId(), Limit: RequestLimit, - UniqueParentId: nodeExecution.Id.NodeId, + UniqueParentId: nodeExecution.GetId().GetNodeId(), }) if err != nil { return err @@ -518,11 +518,11 @@ func (m *MetricsManager) parseSubworkflowNodeExecution(ctx context.Context, // check if the subworkflow started if len(nodeExecutions) == 0 { - *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, nodeExecution.Closure.UpdatedAt, nodeSetup)) + *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetCreatedAt(), nodeExecution.GetClosure().GetUpdatedAt(), nodeSetup)) } else { // frontend overhead startNode := nodeExecutions[v1alpha1.StartNodeID] - *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, startNode.Closure.UpdatedAt, nodeSetup)) + *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetCreatedAt(), startNode.GetClosure().GetUpdatedAt(), nodeSetup)) // retrieve workflow workflowRequest := &admin.ObjectGetRequest{Id: identifier} @@ -532,15 +532,15 @@ func (m *MetricsManager) parseSubworkflowNodeExecution(ctx context.Context, } // node execution(s) - if err := m.parseNodeExecutions(ctx, nodeExecutions, workflow.Closure.CompiledWorkflow, spans, depth); err != nil { + if err := m.parseNodeExecutions(ctx, nodeExecutions, workflow.GetClosure().GetCompiledWorkflow(), spans, depth); err != nil { return err } // backend overhead latestUpstreamNode := m.getLatestUpstreamNodeExecution(v1alpha1.EndNodeID, - workflow.Closure.CompiledWorkflow.Primary.Connections.Upstream, nodeExecutions) - if latestUpstreamNode != nil && !nodeExecution.Closure.UpdatedAt.AsTime().Before(latestUpstreamNode.Closure.UpdatedAt.AsTime()) { - *spans = append(*spans, createOperationSpan(latestUpstreamNode.Closure.UpdatedAt, nodeExecution.Closure.UpdatedAt, nodeTeardown)) + workflow.GetClosure().GetCompiledWorkflow().GetPrimary().GetConnections().GetUpstream(), nodeExecutions) + if latestUpstreamNode != nil && !nodeExecution.GetClosure().GetUpdatedAt().AsTime().Before(latestUpstreamNode.GetClosure().GetUpdatedAt().AsTime()) { + *spans = append(*spans, createOperationSpan(latestUpstreamNode.GetClosure().GetUpdatedAt(), nodeExecution.GetClosure().GetUpdatedAt(), nodeTeardown)) } } @@ -553,32 +553,32 @@ func parseTaskExecution(taskExecution *admin.TaskExecution) *core.Span { spans := make([]*core.Span, 0) // check if plugin has started yet - if taskExecution.Closure.StartedAt == nil || reflect.DeepEqual(taskExecution.Closure.StartedAt, emptyTimestamp) { - spans = append(spans, createOperationSpan(taskExecution.Closure.CreatedAt, taskExecution.Closure.UpdatedAt, taskSetup)) + if taskExecution.GetClosure().GetStartedAt() == nil || reflect.DeepEqual(taskExecution.GetClosure().GetStartedAt(), emptyTimestamp) { + spans = append(spans, createOperationSpan(taskExecution.GetClosure().GetCreatedAt(), taskExecution.GetClosure().GetUpdatedAt(), taskSetup)) } else { // frontend overhead - spans = append(spans, createOperationSpan(taskExecution.Closure.CreatedAt, taskExecution.Closure.StartedAt, taskSetup)) + spans = append(spans, createOperationSpan(taskExecution.GetClosure().GetCreatedAt(), taskExecution.GetClosure().GetStartedAt(), taskSetup)) // check if plugin has completed yet - if taskExecution.Closure.Duration == nil || reflect.DeepEqual(taskExecution.Closure.Duration, emptyDuration) { - spans = append(spans, createOperationSpan(taskExecution.Closure.StartedAt, taskExecution.Closure.UpdatedAt, taskRuntime)) + if taskExecution.GetClosure().GetDuration() == nil || reflect.DeepEqual(taskExecution.GetClosure().GetDuration(), emptyDuration) { + spans = append(spans, createOperationSpan(taskExecution.GetClosure().GetStartedAt(), taskExecution.GetClosure().GetUpdatedAt(), taskRuntime)) } else { // plugin execution - taskEndTime := timestamppb.New(taskExecution.Closure.StartedAt.AsTime().Add(taskExecution.Closure.Duration.AsDuration())) - spans = append(spans, createOperationSpan(taskExecution.Closure.StartedAt, taskEndTime, taskRuntime)) + taskEndTime := timestamppb.New(taskExecution.GetClosure().GetStartedAt().AsTime().Add(taskExecution.GetClosure().GetDuration().AsDuration())) + spans = append(spans, createOperationSpan(taskExecution.GetClosure().GetStartedAt(), taskEndTime, taskRuntime)) // backend overhead - if !taskExecution.Closure.UpdatedAt.AsTime().Before(taskEndTime.AsTime()) { - spans = append(spans, createOperationSpan(taskEndTime, taskExecution.Closure.UpdatedAt, taskTeardown)) + if !taskExecution.GetClosure().GetUpdatedAt().AsTime().Before(taskEndTime.AsTime()) { + spans = append(spans, createOperationSpan(taskEndTime, taskExecution.GetClosure().GetUpdatedAt(), taskTeardown)) } } } return &core.Span{ - StartTime: taskExecution.Closure.CreatedAt, - EndTime: taskExecution.Closure.UpdatedAt, + StartTime: taskExecution.GetClosure().GetCreatedAt(), + EndTime: taskExecution.GetClosure().GetUpdatedAt(), Id: &core.Span_TaskId{ - TaskId: taskExecution.Id, + TaskId: taskExecution.GetId(), }, Spans: spans, } @@ -589,15 +589,15 @@ func parseTaskExecution(taskExecution *admin.TaskExecution) *core.Span { func parseTaskExecutions(taskExecutions []*admin.TaskExecution, spans *[]*core.Span, depth int) { // sort task executions sort.Slice(taskExecutions, func(i, j int) bool { - x := taskExecutions[i].Closure.CreatedAt.AsTime() - y := taskExecutions[j].Closure.CreatedAt.AsTime() + x := taskExecutions[i].GetClosure().GetCreatedAt().AsTime() + y := taskExecutions[j].GetClosure().GetCreatedAt().AsTime() return x.Before(y) }) // iterate over task executions for index, taskExecution := range taskExecutions { if index > 0 { - *spans = append(*spans, createOperationSpan(taskExecutions[index-1].Closure.UpdatedAt, taskExecution.Closure.CreatedAt, nodeReset)) + *spans = append(*spans, createOperationSpan(taskExecutions[index-1].GetClosure().GetUpdatedAt(), taskExecution.GetClosure().GetCreatedAt(), nodeReset)) } if depth != 0 { @@ -611,7 +611,7 @@ func parseTaskExecutions(taskExecutions []*admin.TaskExecution, spans *[]*core.S func (m *MetricsManager) parseTaskNodeExecution(ctx context.Context, nodeExecution *admin.NodeExecution, spans *[]*core.Span, depth int) error { // retrieve task executions taskExecutions, err := m.getTaskExecutions(ctx, &admin.TaskExecutionListRequest{ - NodeExecutionId: nodeExecution.Id, + NodeExecutionId: nodeExecution.GetId(), Limit: RequestLimit, }) if err != nil { @@ -620,19 +620,19 @@ func (m *MetricsManager) parseTaskNodeExecution(ctx context.Context, nodeExecuti // if no task executions then everything is execution overhead if len(taskExecutions) == 0 { - *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, nodeExecution.Closure.UpdatedAt, nodeSetup)) + *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetCreatedAt(), nodeExecution.GetClosure().GetUpdatedAt(), nodeSetup)) } else { // frontend overhead - *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, taskExecutions[0].Closure.CreatedAt, nodeSetup)) + *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetCreatedAt(), taskExecutions[0].GetClosure().GetCreatedAt(), nodeSetup)) // parse task executions parseTaskExecutions(taskExecutions, spans, depth) // backend overhead lastTask := taskExecutions[len(taskExecutions)-1] - if !nodeExecution.Closure.UpdatedAt.AsTime().Before(lastTask.Closure.UpdatedAt.AsTime()) { - *spans = append(*spans, createOperationSpan(taskExecutions[len(taskExecutions)-1].Closure.UpdatedAt, - nodeExecution.Closure.UpdatedAt, nodeTeardown)) + if !nodeExecution.GetClosure().GetUpdatedAt().AsTime().Before(lastTask.GetClosure().GetUpdatedAt().AsTime()) { + *spans = append(*spans, createOperationSpan(taskExecutions[len(taskExecutions)-1].GetClosure().GetUpdatedAt(), + nodeExecution.GetClosure().GetUpdatedAt(), nodeTeardown)) } } @@ -645,13 +645,13 @@ func (m *MetricsManager) GetExecutionMetrics(ctx context.Context, request *admin.WorkflowExecutionGetMetricsRequest) (*admin.WorkflowExecutionGetMetricsResponse, error) { // retrieve workflow execution - executionRequest := &admin.WorkflowExecutionGetRequest{Id: request.Id} + executionRequest := &admin.WorkflowExecutionGetRequest{Id: request.GetId()} execution, err := m.executionManager.GetExecution(ctx, executionRequest) if err != nil { return nil, err } - span, err := m.parseExecution(ctx, execution, int(request.Depth)) + span, err := m.parseExecution(ctx, execution, int(request.GetDepth())) if err != nil { return nil, err } diff --git a/flyteadmin/pkg/manager/impl/metrics_manager_test.go b/flyteadmin/pkg/manager/impl/metrics_manager_test.go index e9392be8d9..b99e0d3243 100644 --- a/flyteadmin/pkg/manager/impl/metrics_manager_test.go +++ b/flyteadmin/pkg/manager/impl/metrics_manager_test.go @@ -28,8 +28,8 @@ var ( func addTimestamp(ts *timestamp.Timestamp, seconds int64) *timestamp.Timestamp { return ×tamp.Timestamp{ - Seconds: ts.Seconds + seconds, - Nanos: ts.Nanos, + Seconds: ts.GetSeconds() + seconds, + Nanos: ts.GetNanos(), } } @@ -89,10 +89,10 @@ func parseSpans(spans []*core.Span) (map[string][]int64, int) { operationDurations := make(map[string][]int64) referenceCount := 0 for _, span := range spans { - switch id := span.Id.(type) { + switch id := span.GetId().(type) { case *core.Span_OperationId: operationID := id.OperationId - duration := span.EndTime.Seconds - span.StartTime.Seconds + duration := span.GetEndTime().GetSeconds() - span.GetStartTime().GetSeconds() if array, exists := operationDurations[operationID]; exists { operationDurations[operationID] = append(array, duration) } else { @@ -907,11 +907,11 @@ func TestParseTaskExecution(t *testing.T) { t.Run(test.name, func(t *testing.T) { // parse task execution span := parseTaskExecution(test.taskExecution) - _, ok := span.Id.(*core.Span_TaskId) + _, ok := span.GetId().(*core.Span_TaskId) assert.True(t, ok) // validate spans - operationDurations, referenceCount := parseSpans(span.Spans) + operationDurations, referenceCount := parseSpans(span.GetSpans()) assert.True(t, reflect.DeepEqual(test.operationDurations, operationDurations)) assert.Equal(t, 0, referenceCount) }) diff --git a/flyteadmin/pkg/manager/impl/named_entity_manager.go b/flyteadmin/pkg/manager/impl/named_entity_manager.go index 883948318a..a8ab24261e 100644 --- a/flyteadmin/pkg/manager/impl/named_entity_manager.go +++ b/flyteadmin/pkg/manager/impl/named_entity_manager.go @@ -41,10 +41,10 @@ func (m *NamedEntityManager) UpdateNamedEntity(ctx context.Context, request *adm logger.Debugf(ctx, "invalid request [%+v]: %v", request, err) return nil, err } - ctx = contextutils.WithProjectDomain(ctx, request.Id.Project, request.Id.Domain) + ctx = contextutils.WithProjectDomain(ctx, request.GetId().GetProject(), request.GetId().GetDomain()) // Ensure entity exists before trying to update it - _, err := util.GetNamedEntity(ctx, m.db, request.ResourceType, request.Id) + _, err := util.GetNamedEntity(ctx, m.db, request.GetResourceType(), request.GetId()) if err != nil { return nil, err } @@ -52,7 +52,7 @@ func (m *NamedEntityManager) UpdateNamedEntity(ctx context.Context, request *adm metadataModel := transformers.CreateNamedEntityModel(request) err = m.db.NamedEntityRepo().Update(ctx, metadataModel) if err != nil { - logger.Debugf(ctx, "Failed to update named_entity for [%+v] with err %v", request.Id, err) + logger.Debugf(ctx, "Failed to update named_entity for [%+v] with err %v", request.GetId(), err) return nil, err } return &admin.NamedEntityUpdateResponse{}, nil @@ -64,8 +64,8 @@ func (m *NamedEntityManager) GetNamedEntity(ctx context.Context, request *admin. logger.Debugf(ctx, "invalid request [%+v]: %v", request, err) return nil, err } - ctx = contextutils.WithProjectDomain(ctx, request.Id.Project, request.Id.Domain) - return util.GetNamedEntity(ctx, m.db, request.ResourceType, request.Id) + ctx = contextutils.WithProjectDomain(ctx, request.GetId().GetProject(), request.GetId().GetDomain()) + return util.GetNamedEntity(ctx, m.db, request.GetResourceType(), request.GetId()) } func (m *NamedEntityManager) getQueryFilters(requestFilters string) ([]common.InlineFilter, error) { @@ -98,51 +98,51 @@ func (m *NamedEntityManager) ListNamedEntities(ctx context.Context, request *adm logger.Debugf(ctx, "invalid request [%+v]: %v", request, err) return nil, err } - ctx = contextutils.WithProjectDomain(ctx, request.Project, request.Domain) + ctx = contextutils.WithProjectDomain(ctx, request.GetProject(), request.GetDomain()) - if len(request.Filters) == 0 { + if len(request.GetFilters()) == 0 { // Add implicit filter to exclude system generated workflows request.Filters = fmt.Sprintf("not_like(name,%s)", ".flytegen%") } // HACK: In order to filter by state (if requested) - we need to amend the filter to use COALESCE // e.g. eq(state, 1) becomes 'WHERE (COALESCE(state, 0) = '1')' since not every NamedEntity necessarily // has an entry, and therefore the default state value '0' (active), should be assumed. - filters, err := m.getQueryFilters(request.Filters) + filters, err := m.getQueryFilters(request.GetFilters()) if err != nil { return nil, err } - sortParameter, err := common.NewSortParameter(request.SortBy, models.NamedEntityColumns) + sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.NamedEntityColumns) if err != nil { return nil, err } - offset, err := validation.ValidateToken(request.Token) + offset, err := validation.ValidateToken(request.GetToken()) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, - "invalid pagination token %s for ListNamedEntities", request.Token) + "invalid pagination token %s for ListNamedEntities", request.GetToken()) } listInput := repoInterfaces.ListNamedEntityInput{ ListResourceInput: repoInterfaces.ListResourceInput{ - Limit: int(request.Limit), + Limit: int(request.GetLimit()), Offset: offset, InlineFilters: filters, SortParameter: sortParameter, }, - Project: request.Project, - Domain: request.Domain, - ResourceType: request.ResourceType, + Project: request.GetProject(), + Domain: request.GetDomain(), + ResourceType: request.GetResourceType(), } output, err := m.db.NamedEntityRepo().List(ctx, listInput) if err != nil { logger.Debugf(ctx, "Failed to list named entities of type: %s with project: %s, domain: %s. Returned error was: %v", - request.ResourceType, request.Project, request.Domain, err) + request.GetResourceType(), request.GetProject(), request.GetDomain(), err) return nil, err } var token string - if len(output.Entities) == int(request.Limit) { + if len(output.Entities) == int(request.GetLimit()) { token = strconv.Itoa(offset + len(output.Entities)) } entities := transformers.FromNamedEntityModels(output.Entities) diff --git a/flyteadmin/pkg/manager/impl/node_execution_manager.go b/flyteadmin/pkg/manager/impl/node_execution_manager.go index 2f0f60977c..82e51bec9b 100644 --- a/flyteadmin/pkg/manager/impl/node_execution_manager.go +++ b/flyteadmin/pkg/manager/impl/node_execution_manager.go @@ -72,30 +72,30 @@ var isParent = common.NewMapFilter(map[string]interface{}{ }) func getNodeExecutionContext(ctx context.Context, identifier *core.NodeExecutionIdentifier) context.Context { - ctx = contextutils.WithProjectDomain(ctx, identifier.ExecutionId.Project, identifier.ExecutionId.Domain) - ctx = contextutils.WithExecutionID(ctx, identifier.ExecutionId.Name) - return contextutils.WithNodeID(ctx, identifier.NodeId) + ctx = contextutils.WithProjectDomain(ctx, identifier.GetExecutionId().GetProject(), identifier.GetExecutionId().GetDomain()) + ctx = contextutils.WithExecutionID(ctx, identifier.GetExecutionId().GetName()) + return contextutils.WithNodeID(ctx, identifier.GetNodeId()) } func (m *NodeExecutionManager) createNodeExecutionWithEvent( ctx context.Context, request *admin.NodeExecutionEventRequest, dynamicWorkflowRemoteClosureReference string) error { var parentTaskExecutionID *uint - if request.Event.ParentTaskMetadata != nil { - taskExecutionModel, err := util.GetTaskExecutionModel(ctx, m.db, request.Event.ParentTaskMetadata.Id) + if request.GetEvent().GetParentTaskMetadata() != nil { + taskExecutionModel, err := util.GetTaskExecutionModel(ctx, m.db, request.GetEvent().GetParentTaskMetadata().GetId()) if err != nil { return err } parentTaskExecutionID = &taskExecutionModel.ID } var parentID *uint - if request.Event.ParentNodeMetadata != nil { + if request.GetEvent().GetParentNodeMetadata() != nil { parentNodeExecutionModel, err := util.GetNodeExecutionModel(ctx, m.db, &core.NodeExecutionIdentifier{ - ExecutionId: request.Event.Id.ExecutionId, - NodeId: request.Event.ParentNodeMetadata.NodeId, + ExecutionId: request.GetEvent().GetId().GetExecutionId(), + NodeId: request.GetEvent().GetParentNodeMetadata().GetNodeId(), }) if err != nil { logger.Errorf(ctx, "failed to fetch node execution for the parent node: %v %s with err", - request.Event.Id.ExecutionId, request.Event.ParentNodeMetadata.NodeId, err) + request.GetEvent().GetId().GetExecutionId(), request.GetEvent().GetParentNodeMetadata().GetNodeId(), err) return err } parentID = &parentNodeExecutionModel.ID @@ -110,12 +110,12 @@ func (m *NodeExecutionManager) createNodeExecutionWithEvent( }) if err != nil { logger.Debugf(ctx, "failed to create node execution model for event request: %s with err: %v", - request.RequestId, err) + request.GetRequestId(), err) return err } if err := m.db.NodeExecutionRepo().Create(ctx, nodeExecutionModel); err != nil { logger.Debugf(ctx, "Failed to create node execution with id [%+v] and model [%+v] "+ - "with err %v", request.Event.Id, nodeExecutionModel, err) + "with err %v", request.GetEvent().GetId(), nodeExecutionModel, err) return err } m.metrics.ClosureSizeBytes.Observe(float64(len(nodeExecutionModel.Closure))) @@ -127,21 +127,21 @@ func (m *NodeExecutionManager) updateNodeExecutionWithEvent( dynamicWorkflowRemoteClosureReference string) (updateNodeExecutionStatus, error) { // If we have an existing execution, check if the phase change is valid nodeExecPhase := core.NodeExecution_Phase(core.NodeExecution_Phase_value[nodeExecutionModel.Phase]) - if nodeExecPhase == request.Event.Phase { - logger.Debugf(ctx, "This phase was already recorded %v for %+v", nodeExecPhase.String(), request.Event.Id) + if nodeExecPhase == request.GetEvent().GetPhase() { + logger.Debugf(ctx, "This phase was already recorded %v for %+v", nodeExecPhase.String(), request.GetEvent().GetId()) return updateFailed, errors.NewFlyteAdminErrorf(codes.AlreadyExists, - "This phase was already recorded %v for %+v", nodeExecPhase.String(), request.Event.Id) + "This phase was already recorded %v for %+v", nodeExecPhase.String(), request.GetEvent().GetId()) } else if common.IsNodeExecutionTerminal(nodeExecPhase) { // Cannot go from a terminal state to anything else logger.Warnf(ctx, "Invalid phase change from %v to %v for node execution %v", - nodeExecPhase.String(), request.Event.Phase.String(), request.Event.Id) + nodeExecPhase.String(), request.GetEvent().GetPhase().String(), request.GetEvent().GetId()) return alreadyInTerminalStatus, nil } // if this node execution kicked off a workflow, validate that the execution exists var childExecutionID *core.WorkflowExecutionIdentifier - if request.Event.GetWorkflowNodeMetadata() != nil { - childExecutionID = request.Event.GetWorkflowNodeMetadata().ExecutionId + if request.GetEvent().GetWorkflowNodeMetadata() != nil { + childExecutionID = request.GetEvent().GetWorkflowNodeMetadata().GetExecutionId() err := validation.ValidateWorkflowExecutionIdentifier(childExecutionID) if err != nil { logger.Errorf(ctx, "Invalid execution ID: %s with err: %v", @@ -158,13 +158,13 @@ func (m *NodeExecutionManager) updateNodeExecutionWithEvent( dynamicWorkflowRemoteClosureReference, m.config.ApplicationConfiguration().GetRemoteDataConfig().InlineEventDataPolicy, m.storageClient) if err != nil { - logger.Debugf(ctx, "failed to update node execution model: %+v with err: %v", request.Event.Id, err) + logger.Debugf(ctx, "failed to update node execution model: %+v with err: %v", request.GetEvent().GetId(), err) return updateFailed, err } err = m.db.NodeExecutionRepo().Update(ctx, nodeExecutionModel) if err != nil { logger.Debugf(ctx, "Failed to update node execution with id [%+v] with err %v", - request.Event.Id, err) + request.GetEvent().GetId(), err) return updateFailed, err } @@ -172,17 +172,17 @@ func (m *NodeExecutionManager) updateNodeExecutionWithEvent( } func formatDynamicWorkflowID(identifier *core.Identifier) string { - return fmt.Sprintf("%s_%s_%s_%s", identifier.Project, identifier.Domain, identifier.Name, identifier.Version) + return fmt.Sprintf("%s_%s_%s_%s", identifier.GetProject(), identifier.GetDomain(), identifier.GetName(), identifier.GetVersion()) } func (m *NodeExecutionManager) uploadDynamicWorkflowClosure( ctx context.Context, nodeID *core.NodeExecutionIdentifier, workflowID *core.Identifier, compiledWorkflowClosure *core.CompiledWorkflowClosure) (storage.DataReference, error) { nestedSubKeys := []string{ - nodeID.ExecutionId.Project, - nodeID.ExecutionId.Domain, - nodeID.ExecutionId.Name, - nodeID.NodeId, + nodeID.GetExecutionId().GetProject(), + nodeID.GetExecutionId().GetDomain(), + nodeID.GetExecutionId().GetName(), + nodeID.GetNodeId(), formatDynamicWorkflowID(workflowID), } nestedKeys := append(m.storagePrefix, nestedSubKeys...) @@ -204,17 +204,17 @@ func (m *NodeExecutionManager) uploadDynamicWorkflowClosure( func (m *NodeExecutionManager) CreateNodeEvent(ctx context.Context, request *admin.NodeExecutionEventRequest) ( *admin.NodeExecutionEventResponse, error) { if err := validation.ValidateNodeExecutionEventRequest(request, m.config.ApplicationConfiguration().GetRemoteDataConfig().MaxSizeInBytes); err != nil { - logger.Debugf(ctx, "CreateNodeEvent called with invalid identifier [%+v]: %v", request.Event.Id, err) + logger.Debugf(ctx, "CreateNodeEvent called with invalid identifier [%+v]: %v", request.GetEvent().GetId(), err) } - ctx = getNodeExecutionContext(ctx, request.Event.Id) + ctx = getNodeExecutionContext(ctx, request.GetEvent().GetId()) logger.Debugf(ctx, "Received node execution event for Node Exec Id [%+v] transitioning to phase [%v], w/ Metadata [%v]", - request.Event.Id, request.Event.Phase, request.Event.ParentTaskMetadata) + request.GetEvent().GetId(), request.GetEvent().GetPhase(), request.GetEvent().GetParentTaskMetadata()) - executionID := request.Event.Id.ExecutionId + executionID := request.GetEvent().GetId().GetExecutionId() workflowExecution, err := m.db.ExecutionRepo().Get(ctx, repoInterfaces.Identifier{ - Project: executionID.Project, - Domain: executionID.Domain, - Name: executionID.Name, + Project: executionID.GetProject(), + Domain: executionID.GetDomain(), + Name: executionID.GetName(), }) if err != nil { m.metrics.MissingWorkflowExecution.Inc() @@ -228,15 +228,15 @@ func (m *NodeExecutionManager) CreateNodeEvent(ctx context.Context, request *adm return nil, fmt.Errorf("failed to get existing execution id: [%+v]", executionID) } - if err := validation.ValidateCluster(ctx, workflowExecution.Cluster, request.Event.ProducerId); err != nil { + if err := validation.ValidateCluster(ctx, workflowExecution.Cluster, request.GetEvent().GetProducerId()); err != nil { return nil, err } var dynamicWorkflowRemoteClosureReference string - if request.Event.GetTaskNodeMetadata() != nil && request.Event.GetTaskNodeMetadata().DynamicWorkflow != nil { + if request.GetEvent().GetTaskNodeMetadata() != nil && request.GetEvent().GetTaskNodeMetadata().GetDynamicWorkflow() != nil { dynamicWorkflowRemoteClosureDataReference, err := m.uploadDynamicWorkflowClosure( - ctx, request.Event.Id, request.Event.GetTaskNodeMetadata().DynamicWorkflow.Id, - request.Event.GetTaskNodeMetadata().DynamicWorkflow.CompiledWorkflow) + ctx, request.GetEvent().GetId(), request.GetEvent().GetTaskNodeMetadata().GetDynamicWorkflow().GetId(), + request.GetEvent().GetTaskNodeMetadata().GetDynamicWorkflow().GetCompiledWorkflow()) if err != nil { return nil, err } @@ -244,12 +244,12 @@ func (m *NodeExecutionManager) CreateNodeEvent(ctx context.Context, request *adm } nodeExecutionModel, err := m.db.NodeExecutionRepo().Get(ctx, repoInterfaces.NodeExecutionResource{ - NodeExecutionIdentifier: request.Event.Id, + NodeExecutionIdentifier: request.GetEvent().GetId(), }) if err != nil { if err.(errors.FlyteAdminError).Code() != codes.NotFound { logger.Debugf(ctx, "Failed to retrieve existing node execution with id [%+v] with err: %v", - request.Event.Id, err) + request.GetEvent().GetId(), err) return nil, err } err = m.createNodeExecutionWithEvent(ctx, request, dynamicWorkflowRemoteClosureReference) @@ -265,33 +265,33 @@ func (m *NodeExecutionManager) CreateNodeEvent(ctx context.Context, request *adm } if updateStatus == alreadyInTerminalStatus { - curPhase := request.Event.Phase.String() + curPhase := request.GetEvent().GetPhase().String() errorMsg := fmt.Sprintf("Invalid phase change from %s to %s for node execution %v", phase.String(), curPhase, nodeExecutionModel.ID) return nil, errors.NewAlreadyInTerminalStateError(ctx, errorMsg, curPhase) } } m.dbEventWriter.Write(request) - if request.Event.Phase == core.NodeExecution_RUNNING { + if request.GetEvent().GetPhase() == core.NodeExecution_RUNNING { m.metrics.ActiveNodeExecutions.Inc() - } else if common.IsNodeExecutionTerminal(request.Event.Phase) { + } else if common.IsNodeExecutionTerminal(request.GetEvent().GetPhase()) { m.metrics.ActiveNodeExecutions.Dec() - m.metrics.NodeExecutionsTerminated.Inc(contextutils.WithPhase(ctx, request.Event.Phase.String())) - if request.Event.GetOutputData() != nil { - m.metrics.NodeExecutionOutputBytes.Observe(float64(proto.Size(request.Event.GetOutputData()))) + m.metrics.NodeExecutionsTerminated.Inc(contextutils.WithPhase(ctx, request.GetEvent().GetPhase().String())) + if request.GetEvent().GetOutputData() != nil { + m.metrics.NodeExecutionOutputBytes.Observe(float64(proto.Size(request.GetEvent().GetOutputData()))) } } m.metrics.NodeExecutionEventsCreated.Inc() if err := m.eventPublisher.Publish(ctx, proto.MessageName(request), request); err != nil { m.metrics.PublishEventError.Inc() - logger.Infof(ctx, "error publishing event [%+v] with err: [%v]", request.RequestId, err) + logger.Infof(ctx, "error publishing event [%+v] with err: [%v]", request.GetRequestId(), err) } go func() { ceCtx := context.TODO() if err := m.cloudEventPublisher.Publish(ceCtx, proto.MessageName(request), request); err != nil { - logger.Infof(ctx, "error publishing cloud event [%+v] with err: [%v]", request.RequestId, err) + logger.Infof(ctx, "error publishing cloud event [%+v] with err: [%v]", request.GetRequestId(), err) } }() @@ -299,15 +299,15 @@ func (m *NodeExecutionManager) CreateNodeEvent(ctx context.Context, request *adm } func (m *NodeExecutionManager) GetDynamicNodeWorkflow(ctx context.Context, request *admin.GetDynamicNodeWorkflowRequest) (*admin.DynamicNodeWorkflowResponse, error) { - if err := validation.ValidateNodeExecutionIdentifier(request.Id); err != nil { - logger.Debugf(ctx, "can't get node execution data with invalid identifier [%+v]: %v", request.Id, err) + if err := validation.ValidateNodeExecutionIdentifier(request.GetId()); err != nil { + logger.Debugf(ctx, "can't get node execution data with invalid identifier [%+v]: %v", request.GetId(), err) } - ctx = getNodeExecutionContext(ctx, request.Id) - nodeExecutionModel, err := util.GetNodeExecutionModel(ctx, m.db, request.Id) + ctx = getNodeExecutionContext(ctx, request.GetId()) + nodeExecutionModel, err := util.GetNodeExecutionModel(ctx, m.db, request.GetId()) if err != nil { logger.Errorf(ctx, "failed to get node execution with id [%+v] with err %v", - request.Id, err) + request.GetId(), err) return nil, err } @@ -331,7 +331,7 @@ func (m *NodeExecutionManager) transformNodeExecutionModel(ctx context.Context, if err != nil { return nil, err } - if internalData.EventVersion == 0 { + if internalData.GetEventVersion() == 0 { // Issue more expensive query to determine whether this node is a parent and/or dynamic node. nodeExecutionModel, err = m.db.NodeExecutionRepo().GetWithChildren(ctx, repoInterfaces.NodeExecutionResource{ NodeExecutionIdentifier: nodeExecutionID, @@ -370,17 +370,17 @@ func (m *NodeExecutionManager) transformNodeExecutionModelList(ctx context.Conte func (m *NodeExecutionManager) GetNodeExecution( ctx context.Context, request *admin.NodeExecutionGetRequest) (*admin.NodeExecution, error) { - if err := validation.ValidateNodeExecutionIdentifier(request.Id); err != nil { - logger.Debugf(ctx, "get node execution called with invalid identifier [%+v]: %v", request.Id, err) + if err := validation.ValidateNodeExecutionIdentifier(request.GetId()); err != nil { + logger.Debugf(ctx, "get node execution called with invalid identifier [%+v]: %v", request.GetId(), err) } - ctx = getNodeExecutionContext(ctx, request.Id) - nodeExecutionModel, err := util.GetNodeExecutionModel(ctx, m.db, request.Id) + ctx = getNodeExecutionContext(ctx, request.GetId()) + nodeExecutionModel, err := util.GetNodeExecutionModel(ctx, m.db, request.GetId()) if err != nil { logger.Debugf(ctx, "Failed to get node execution with id [%+v] with err %v", - request.Id, err) + request.GetId(), err) return nil, err } - nodeExecution, err := m.transformNodeExecutionModel(ctx, *nodeExecutionModel, request.Id, nil) + nodeExecution, err := m.transformNodeExecutionModel(ctx, *nodeExecutionModel, request.GetId(), nil) if err != nil { return nil, err } @@ -448,17 +448,17 @@ func (m *NodeExecutionManager) ListNodeExecutions( if err := validation.ValidateNodeExecutionListRequest(request); err != nil { return nil, err } - ctx = getExecutionContext(ctx, request.WorkflowExecutionId) + ctx = getExecutionContext(ctx, request.GetWorkflowExecutionId()) - identifierFilters, err := util.GetWorkflowExecutionIdentifierFilters(ctx, request.WorkflowExecutionId, common.NodeExecution) + identifierFilters, err := util.GetWorkflowExecutionIdentifierFilters(ctx, request.GetWorkflowExecutionId(), common.NodeExecution) if err != nil { return nil, err } var mapFilters []common.MapFilter - if request.UniqueParentId != "" { + if request.GetUniqueParentId() != "" { parentNodeExecution, err := util.GetNodeExecutionModel(ctx, m.db, &core.NodeExecutionIdentifier{ - ExecutionId: request.WorkflowExecutionId, - NodeId: request.UniqueParentId, + ExecutionId: request.GetWorkflowExecutionId(), + NodeId: request.GetUniqueParentId(), }) if err != nil { return nil, err @@ -475,7 +475,7 @@ func (m *NodeExecutionManager) ListNodeExecutions( } } return m.listNodeExecutions( - ctx, identifierFilters, request.Filters, request.Limit, request.Token, request.SortBy, mapFilters) + ctx, identifierFilters, request.GetFilters(), request.GetLimit(), request.GetToken(), request.GetSortBy(), mapFilters) } // Filters on node executions matching the execution parameters (execution project, domain, and name) as well as the @@ -486,13 +486,13 @@ func (m *NodeExecutionManager) ListNodeExecutionsForTask( if err := validation.ValidateNodeExecutionForTaskListRequest(request); err != nil { return nil, err } - ctx = getTaskExecutionContext(ctx, request.TaskExecutionId) + ctx = getTaskExecutionContext(ctx, request.GetTaskExecutionId()) identifierFilters, err := util.GetWorkflowExecutionIdentifierFilters( - ctx, request.TaskExecutionId.NodeExecutionId.ExecutionId, common.NodeExecution) + ctx, request.GetTaskExecutionId().GetNodeExecutionId().GetExecutionId(), common.NodeExecution) if err != nil { return nil, err } - parentTaskExecutionModel, err := util.GetTaskExecutionModel(ctx, m.db, request.TaskExecutionId) + parentTaskExecutionModel, err := util.GetTaskExecutionModel(ctx, m.db, request.GetTaskExecutionId()) if err != nil { return nil, err } @@ -503,26 +503,26 @@ func (m *NodeExecutionManager) ListNodeExecutionsForTask( } identifierFilters = append(identifierFilters, nodeIDFilter) return m.listNodeExecutions( - ctx, identifierFilters, request.Filters, request.Limit, request.Token, request.SortBy, nil) + ctx, identifierFilters, request.GetFilters(), request.GetLimit(), request.GetToken(), request.GetSortBy(), nil) } func (m *NodeExecutionManager) GetNodeExecutionData( ctx context.Context, request *admin.NodeExecutionGetDataRequest) (*admin.NodeExecutionGetDataResponse, error) { - if err := validation.ValidateNodeExecutionIdentifier(request.Id); err != nil { - logger.Debugf(ctx, "can't get node execution data with invalid identifier [%+v]: %v", request.Id, err) + if err := validation.ValidateNodeExecutionIdentifier(request.GetId()); err != nil { + logger.Debugf(ctx, "can't get node execution data with invalid identifier [%+v]: %v", request.GetId(), err) } - ctx = getNodeExecutionContext(ctx, request.Id) - nodeExecutionModel, err := util.GetNodeExecutionModel(ctx, m.db, request.Id) + ctx = getNodeExecutionContext(ctx, request.GetId()) + nodeExecutionModel, err := util.GetNodeExecutionModel(ctx, m.db, request.GetId()) if err != nil { logger.Debugf(ctx, "Failed to get node execution with id [%+v] with err %v", - request.Id, err) + request.GetId(), err) return nil, err } nodeExecution, err := transformers.FromNodeExecutionModel(*nodeExecutionModel, transformers.DefaultExecutionTransformerOptions) if err != nil { - logger.Debugf(ctx, "failed to transform node execution model [%+v] when fetching data: %v", request.Id, err) + logger.Debugf(ctx, "failed to transform node execution model [%+v] when fetching data: %v", request.GetId(), err) return nil, err } @@ -532,7 +532,7 @@ func (m *NodeExecutionManager) GetNodeExecutionData( group.Go(func() error { var err error inputs, inputURLBlob, err = util.GetInputs(groupCtx, m.urlData, m.config.ApplicationConfiguration().GetRemoteDataConfig(), - m.storageClient, nodeExecution.InputUri) + m.storageClient, nodeExecution.GetInputUri()) return err }) @@ -541,7 +541,7 @@ func (m *NodeExecutionManager) GetNodeExecutionData( group.Go(func() error { var err error outputs, outputURLBlob, err = util.GetOutputs(groupCtx, m.urlData, m.config.ApplicationConfiguration().GetRemoteDataConfig(), - m.storageClient, nodeExecution.Closure) + m.storageClient, nodeExecution.GetClosure()) return err }) @@ -555,7 +555,7 @@ func (m *NodeExecutionManager) GetNodeExecutionData( Outputs: outputURLBlob, FullInputs: inputs, FullOutputs: outputs, - FlyteUrls: common.FlyteURLsFromNodeExecutionID(request.Id, nodeExecution.GetClosure() != nil && nodeExecution.GetClosure().GetDeckUri() != ""), + FlyteUrls: common.FlyteURLsFromNodeExecutionID(request.GetId(), nodeExecution.GetClosure() != nil && nodeExecution.GetClosure().GetDeckUri() != ""), } if len(nodeExecutionModel.DynamicWorkflowRemoteClosureReference) > 0 { @@ -565,17 +565,17 @@ func (m *NodeExecutionManager) GetNodeExecutionData( } response.DynamicWorkflow = &admin.DynamicWorkflowNodeMetadata{ - Id: closure.Primary.Template.Id, + Id: closure.GetPrimary().GetTemplate().GetId(), CompiledWorkflow: closure, - DynamicJobSpecUri: nodeExecution.Closure.DynamicJobSpecUri, + DynamicJobSpecUri: nodeExecution.GetClosure().GetDynamicJobSpecUri(), } } - m.metrics.NodeExecutionInputBytes.Observe(float64(response.Inputs.Bytes)) - if response.Outputs.Bytes > 0 { - m.metrics.NodeExecutionOutputBytes.Observe(float64(response.Outputs.Bytes)) - } else if response.FullOutputs != nil { - m.metrics.NodeExecutionOutputBytes.Observe(float64(proto.Size(response.FullOutputs))) + m.metrics.NodeExecutionInputBytes.Observe(float64(response.GetInputs().GetBytes())) + if response.GetOutputs().GetBytes() > 0 { + m.metrics.NodeExecutionOutputBytes.Observe(float64(response.GetOutputs().GetBytes())) + } else if response.GetFullOutputs() != nil { + m.metrics.NodeExecutionOutputBytes.Observe(float64(proto.Size(response.GetFullOutputs()))) } return response, nil @@ -588,9 +588,9 @@ func (m *NodeExecutionManager) fetchDynamicWorkflowClosure(ctx context.Context, return nil, errors.NewFlyteAdminErrorf(codes.Internal, "Unable to read WorkflowClosure from location %s : %v", ref, err) } - if wf := closure.Primary; wf == nil { + if wf := closure.GetPrimary(); wf == nil { return nil, errors.NewFlyteAdminErrorf(codes.Internal, "Empty primary workflow definition in loaded dynamic workflow model.") - } else if template := wf.Template; template == nil { + } else if template := wf.GetTemplate(); template == nil { return nil, errors.NewFlyteAdminErrorf(codes.Internal, "Empty primary workflow template in loaded dynamic workflow model.") } return closure, nil diff --git a/flyteadmin/pkg/manager/impl/node_execution_manager_test.go b/flyteadmin/pkg/manager/impl/node_execution_manager_test.go index b43c785b33..69a0203452 100644 --- a/flyteadmin/pkg/manager/impl/node_execution_manager_test.go +++ b/flyteadmin/pkg/manager/impl/node_execution_manager_test.go @@ -71,7 +71,7 @@ var request = &admin.NodeExecutionEventRequest{ TargetMetadata: &event.NodeExecutionEvent_TaskNodeMetadata{ TaskNodeMetadata: &event.TaskNodeMetadata{ DynamicWorkflow: &event.DynamicWorkflowNodeMetadata{ - Id: dynamicWorkflowClosure.Primary.Template.Id, + Id: dynamicWorkflowClosure.GetPrimary().GetTemplate().GetId(), CompiledWorkflow: dynamicWorkflowClosure, }, }, @@ -131,7 +131,7 @@ func TestCreateNodeEvent(t *testing.T) { return models.NodeExecution{}, flyteAdminErrors.NewFlyteAdminError(codes.NotFound, "foo") }) expectedClosure := admin.NodeExecutionClosure{ - Phase: request.Event.Phase, + Phase: request.GetEvent().GetPhase(), StartedAt: occurredAtProto, CreatedAt: occurredAtProto, UpdatedAt: occurredAtProto, @@ -450,8 +450,8 @@ func TestTransformNodeExecutionModel(t *testing.T) { } nodeExecution, err := manager.transformNodeExecutionModel(ctx, models.NodeExecution{}, nodeExecID, transformers.DefaultExecutionTransformerOptions) assert.NoError(t, err) - assert.True(t, proto.Equal(nodeExecID, nodeExecution.Id)) - assert.True(t, nodeExecution.Metadata.IsParentNode) + assert.True(t, proto.Equal(nodeExecID, nodeExecution.GetId())) + assert.True(t, nodeExecution.GetMetadata().GetIsParentNode()) }) t.Run("event version > 0", func(t *testing.T) { manager := NodeExecutionManager{ @@ -480,8 +480,8 @@ func TestTransformNodeExecutionModel(t *testing.T) { InternalData: internalDataBytes, }, nodeExecID, transformers.DefaultExecutionTransformerOptions) assert.NoError(t, err) - assert.True(t, nodeExecution.Metadata.IsParentNode) - assert.True(t, nodeExecution.Metadata.IsDynamic) + assert.True(t, nodeExecution.GetMetadata().GetIsParentNode()) + assert.True(t, nodeExecution.GetMetadata().GetIsDynamic()) }) t.Run("transform internal data err", func(t *testing.T) { manager := NodeExecutionManager{ @@ -865,7 +865,7 @@ func TestListNodeExecutionsLevelZero(t *testing.T) { }, }) assert.NoError(t, err) - assert.Len(t, nodeExecutions.NodeExecutions, 1) + assert.Len(t, nodeExecutions.GetNodeExecutions(), 1) assert.True(t, proto.Equal(&admin.NodeExecution{ Id: &core.NodeExecutionIdentifier{ NodeId: "node id", @@ -878,8 +878,8 @@ func TestListNodeExecutionsLevelZero(t *testing.T) { InputUri: "input uri", Closure: &expectedClosure, Metadata: &expectedMetadata, - }, nodeExecutions.NodeExecutions[0])) - assert.Equal(t, "3", nodeExecutions.Token) + }, nodeExecutions.GetNodeExecutions()[0])) + assert.Equal(t, "3", nodeExecutions.GetToken()) } func TestListNodeExecutionsWithParent(t *testing.T) { @@ -895,7 +895,7 @@ func TestListNodeExecutionsWithParent(t *testing.T) { closureBytes, _ := proto.Marshal(&expectedClosure) parentID := uint(12) repository.NodeExecutionRepo().(*repositoryMocks.MockNodeExecutionRepo).SetGetCallback(func(ctx context.Context, input interfaces.NodeExecutionResource) (execution models.NodeExecution, e error) { - assert.Equal(t, "parent_1", input.NodeExecutionIdentifier.NodeId) + assert.Equal(t, "parent_1", input.NodeExecutionIdentifier.GetNodeId()) return models.NodeExecution{ BaseModel: models.BaseModel{ ID: parentID, @@ -966,7 +966,7 @@ func TestListNodeExecutionsWithParent(t *testing.T) { UniqueParentId: "parent_1", }) assert.Nil(t, err) - assert.Len(t, nodeExecutions.NodeExecutions, 1) + assert.Len(t, nodeExecutions.GetNodeExecutions(), 1) assert.True(t, proto.Equal(&admin.NodeExecution{ Id: &core.NodeExecutionIdentifier{ NodeId: "node id", @@ -979,8 +979,8 @@ func TestListNodeExecutionsWithParent(t *testing.T) { InputUri: "input uri", Closure: &expectedClosure, Metadata: &expectedMetadata, - }, nodeExecutions.NodeExecutions[0])) - assert.Equal(t, "3", nodeExecutions.Token) + }, nodeExecutions.GetNodeExecutions()[0])) + assert.Equal(t, "3", nodeExecutions.GetToken()) } func TestListNodeExecutions_WithJoinTableFilter(t *testing.T) { @@ -1089,7 +1089,7 @@ func TestListNodeExecutions_WithJoinTableFilter(t *testing.T) { Filters: "eq(execution.phase, SUCCEEDED)", }) assert.NoError(t, err) - assert.Len(t, nodeExecutions.NodeExecutions, 1) + assert.Len(t, nodeExecutions.GetNodeExecutions(), 1) assert.True(t, proto.Equal(&admin.NodeExecution{ Id: &core.NodeExecutionIdentifier{ NodeId: "node id", @@ -1102,8 +1102,8 @@ func TestListNodeExecutions_WithJoinTableFilter(t *testing.T) { InputUri: "input uri", Closure: &expectedClosure, Metadata: &expectedMetadata, - }, nodeExecutions.NodeExecutions[0])) - assert.Equal(t, "3", nodeExecutions.Token) + }, nodeExecutions.GetNodeExecutions()[0])) + assert.Equal(t, "3", nodeExecutions.GetToken()) } func TestListNodeExecutions_InvalidParams(t *testing.T) { @@ -1316,7 +1316,7 @@ func TestListNodeExecutionsForTask(t *testing.T) { }, }) assert.Nil(t, err) - assert.Len(t, nodeExecutions.NodeExecutions, 1) + assert.Len(t, nodeExecutions.GetNodeExecutions(), 1) expectedMetadata := admin.NodeExecutionMetaData{ SpecNodeId: "spec-n1", IsParentNode: true, @@ -1333,8 +1333,8 @@ func TestListNodeExecutionsForTask(t *testing.T) { InputUri: "input uri", Closure: &expectedClosure, Metadata: &expectedMetadata, - }, nodeExecutions.NodeExecutions[0])) - assert.Equal(t, "3", nodeExecutions.Token) + }, nodeExecutions.GetNodeExecutions()[0])) + assert.Equal(t, "3", nodeExecutions.GetToken()) } func TestGetNodeExecutionData(t *testing.T) { @@ -1439,7 +1439,7 @@ func TestGetNodeExecutionData(t *testing.T) { FullInputs: fullInputs, FullOutputs: fullOutputs, DynamicWorkflow: &admin.DynamicWorkflowNodeMetadata{ - Id: dynamicWorkflowClosure.Primary.Template.Id, + Id: dynamicWorkflowClosure.GetPrimary().GetTemplate().GetId(), CompiledWorkflow: dynamicWorkflowClosure, }, FlyteUrls: &admin.FlyteURLs{ @@ -1465,7 +1465,7 @@ func Test_GetDynamicNodeWorkflow_Success(t *testing.T) { return models.NodeExecution{DynamicWorkflowRemoteClosureReference: remoteClosureIdentifier}, nil }) mockStorageClient := commonMocks.GetMockStorageClient() - expectedClosure := testutils.GetWorkflowClosure().CompiledWorkflow + expectedClosure := testutils.GetWorkflowClosure().GetCompiledWorkflow() mockStorageClient.ComposedProtobufStore.(*commonMocks.TestDataStore).ReadProtobufCb = func(ctx context.Context, reference storage.DataReference, msg proto.Message) error { assert.Equal(t, remoteClosureIdentifier, reference.String()) bytes, err := proto.Marshal(expectedClosure) diff --git a/flyteadmin/pkg/manager/impl/project_manager.go b/flyteadmin/pkg/manager/impl/project_manager.go index a1ac99b412..a19b61ca01 100644 --- a/flyteadmin/pkg/manager/impl/project_manager.go +++ b/flyteadmin/pkg/manager/impl/project_manager.go @@ -33,7 +33,7 @@ func (m *ProjectManager) CreateProject(ctx context.Context, request *admin.Proje if err := validation.ValidateProjectRegisterRequest(request); err != nil { return nil, err } - projectModel := transformers.CreateProjectModel(request.Project) + projectModel := transformers.CreateProjectModel(request.GetProject()) err := m.db.ProjectRepo().Create(ctx, projectModel) if err != nil { return nil, err @@ -44,14 +44,14 @@ func (m *ProjectManager) CreateProject(ctx context.Context, request *admin.Proje func (m *ProjectManager) ListProjects(ctx context.Context, request *admin.ProjectListRequest) (*admin.Projects, error) { spec := util.FilterSpec{ - RequestFilters: request.Filters, + RequestFilters: request.GetFilters(), } filters, err := util.GetDbFilters(spec, common.Project) if err != nil { return nil, err } - sortParameter, err := common.NewSortParameter(request.SortBy, models.ProjectColumns) + sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.ProjectColumns) if err != nil { return nil, err } @@ -59,14 +59,14 @@ func (m *ProjectManager) ListProjects(ctx context.Context, request *admin.Projec sortParameter = alphabeticalSortParam } - offset, err := validation.ValidateToken(request.Token) + offset, err := validation.ValidateToken(request.GetToken()) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, - "invalid pagination token %s for ListProjects", request.Token) + "invalid pagination token %s for ListProjects", request.GetToken()) } // And finally, query the database listProjectsInput := repoInterfaces.ListResourceInput{ - Limit: int(request.Limit), + Limit: int(request.GetLimit()), Offset: offset, InlineFilters: filters, SortParameter: sortParameter, @@ -75,10 +75,10 @@ func (m *ProjectManager) ListProjects(ctx context.Context, request *admin.Projec if err != nil { return nil, err } - projects := transformers.FromProjectModels(projectModels, m.GetDomains(ctx, &admin.GetDomainRequest{}).Domains) + projects := transformers.FromProjectModels(projectModels, m.GetDomains(ctx, &admin.GetDomainRequest{}).GetDomains()) var token string - if len(projects) == int(request.Limit) { + if len(projects) == int(request.GetLimit()) { token = strconv.Itoa(offset + len(projects)) } @@ -93,7 +93,7 @@ func (m *ProjectManager) UpdateProject(ctx context.Context, projectUpdate *admin projectRepo := m.db.ProjectRepo() // Fetch the existing project if exists. If not, return err and do not update. - _, err := projectRepo.Get(ctx, projectUpdate.Id) + _, err := projectRepo.Get(ctx, projectUpdate.GetId()) if err != nil { return nil, err } @@ -118,11 +118,11 @@ func (m *ProjectManager) GetProject(ctx context.Context, request *admin.ProjectG if err := validation.ValidateProjectGetRequest(request); err != nil { return nil, err } - projectModel, err := m.db.ProjectRepo().Get(ctx, request.Id) + projectModel, err := m.db.ProjectRepo().Get(ctx, request.GetId()) if err != nil { return nil, err } - projectResponse := transformers.FromProjectModel(projectModel, m.GetDomains(ctx, &admin.GetDomainRequest{}).Domains) + projectResponse := transformers.FromProjectModel(projectModel, m.GetDomains(ctx, &admin.GetDomainRequest{}).GetDomains()) return projectResponse, nil } diff --git a/flyteadmin/pkg/manager/impl/project_manager_test.go b/flyteadmin/pkg/manager/impl/project_manager_test.go index 38117a7ec9..42bf93cafb 100644 --- a/flyteadmin/pkg/manager/impl/project_manager_test.go +++ b/flyteadmin/pkg/manager/impl/project_manager_test.go @@ -70,11 +70,11 @@ func testListProjects(request *admin.ProjectListRequest, token string, orderExpr resp, err := projectManager.ListProjects(context.Background(), request) assert.NoError(t, err) - assert.Len(t, resp.Projects, 1) + assert.Len(t, resp.GetProjects(), 1) assert.Equal(t, token, resp.GetToken()) - assert.Len(t, resp.Projects[0].Domains, 4) - for _, domain := range resp.Projects[0].Domains { - assert.Contains(t, testDomainsForProjManager, domain.Id) + assert.Len(t, resp.GetProjects()[0].GetDomains(), 4) + for _, domain := range resp.GetProjects()[0].GetDomains() { + assert.Contains(t, testDomainsForProjManager, domain.GetId()) } } @@ -300,10 +300,10 @@ func TestProjectManager_TestGetProject(t *testing.T) { resp, _ := projectManager.GetProject(context.Background(), mockedProject) - assert.Equal(t, mockedProject.Id, resp.Id) - assert.Equal(t, "a-mocked-project", resp.Name) - assert.Equal(t, "A mocked project", resp.Description) - assert.Equal(t, admin.Project_ProjectState(0), resp.State) + assert.Equal(t, mockedProject.GetId(), resp.GetId()) + assert.Equal(t, "a-mocked-project", resp.GetName()) + assert.Equal(t, "A mocked project", resp.GetDescription()) + assert.Equal(t, admin.Project_ProjectState(0), resp.GetState()) } func TestProjectManager_TestGetProject_ErrorDueToProjectNotFound(t *testing.T) { diff --git a/flyteadmin/pkg/manager/impl/resources/resource_manager.go b/flyteadmin/pkg/manager/impl/resources/resource_manager.go index b1304930cf..4dad45d987 100644 --- a/flyteadmin/pkg/manager/impl/resources/resource_manager.go +++ b/flyteadmin/pkg/manager/impl/resources/resource_manager.go @@ -76,7 +76,7 @@ func (m *ResourceManager) createOrMergeUpdateWorkflowAttributes( return nil, err } updatedModel, err := transformers.MergeUpdateWorkflowAttributes( - ctx, existing, resourceType, &resourceID, request.Attributes) + ctx, existing, resourceType, &resourceID, request.GetAttributes()) if err != nil { return nil, err } @@ -96,11 +96,11 @@ func (m *ResourceManager) UpdateWorkflowAttributes( return nil, err } - model, err := transformers.WorkflowAttributesToResourceModel(request.Attributes, resource) + model, err := transformers.WorkflowAttributesToResourceModel(request.GetAttributes(), resource) if err != nil { return nil, err } - if request.Attributes.GetMatchingAttributes().GetPluginOverrides() != nil { + if request.GetAttributes().GetMatchingAttributes().GetPluginOverrides() != nil { return m.createOrMergeUpdateWorkflowAttributes(ctx, request, model, admin.MatchableResource_PLUGIN_OVERRIDE) } err = m.db.ResourceRepo().CreateOrUpdate(ctx, model) @@ -118,7 +118,7 @@ func (m *ResourceManager) GetWorkflowAttributes( return nil, err } workflowAttributesModel, err := m.db.ResourceRepo().Get( - ctx, repo_interface.ResourceID{Project: request.Project, Domain: request.Domain, Workflow: request.Workflow, ResourceType: request.ResourceType.String()}) + ctx, repo_interface.ResourceID{Project: request.GetProject(), Domain: request.GetDomain(), Workflow: request.GetWorkflow(), ResourceType: request.GetResourceType().String()}) if err != nil { return nil, err } @@ -137,11 +137,11 @@ func (m *ResourceManager) DeleteWorkflowAttributes(ctx context.Context, return nil, err } if err := m.db.ResourceRepo().Delete( - ctx, repo_interface.ResourceID{Project: request.Project, Domain: request.Domain, Workflow: request.Workflow, ResourceType: request.ResourceType.String()}); err != nil { + ctx, repo_interface.ResourceID{Project: request.GetProject(), Domain: request.GetDomain(), Workflow: request.GetWorkflow(), ResourceType: request.GetResourceType().String()}); err != nil { return nil, err } - logger.Infof(ctx, "Deleted workflow attributes for: %s-%s-%s (%s)", request.Project, - request.Domain, request.Workflow, request.ResourceType.String()) + logger.Infof(ctx, "Deleted workflow attributes for: %s-%s-%s (%s)", request.GetProject(), + request.GetDomain(), request.GetWorkflow(), request.GetResourceType().String()) return &admin.WorkflowAttributesDeleteResponse{}, nil } @@ -154,12 +154,12 @@ func (m *ResourceManager) UpdateProjectAttributes(ctx context.Context, request * if resource, err = validation.ValidateProjectAttributesUpdateRequest(ctx, m.db, request); err != nil { return nil, err } - model, err := transformers.ProjectAttributesToResourceModel(request.Attributes, resource) + model, err := transformers.ProjectAttributesToResourceModel(request.GetAttributes(), resource) if err != nil { return nil, err } - if request.Attributes.GetMatchingAttributes().GetPluginOverrides() != nil { + if request.GetAttributes().GetMatchingAttributes().GetPluginOverrides() != nil { return m.createOrMergeUpdateProjectAttributes(ctx, request, model, admin.MatchableResource_PLUGIN_OVERRIDE) } @@ -174,12 +174,12 @@ func (m *ResourceManager) UpdateProjectAttributes(ctx context.Context, request * func (m *ResourceManager) GetProjectAttributesBase(ctx context.Context, request *admin.ProjectAttributesGetRequest) ( *admin.ProjectAttributesGetResponse, error) { - if err := validation.ValidateProjectExists(ctx, m.db, request.Project); err != nil { + if err := validation.ValidateProjectExists(ctx, m.db, request.GetProject()); err != nil { return nil, err } projectAttributesModel, err := m.db.ResourceRepo().GetProjectLevel( - ctx, repo_interface.ResourceID{Project: request.Project, Domain: "", ResourceType: request.ResourceType.String()}) + ctx, repo_interface.ResourceID{Project: request.GetProject(), Domain: "", ResourceType: request.GetResourceType().String()}) if err != nil { return nil, err } @@ -191,8 +191,8 @@ func (m *ResourceManager) GetProjectAttributesBase(ctx context.Context, request return &admin.ProjectAttributesGetResponse{ Attributes: &admin.ProjectAttributes{ - Project: request.Project, - MatchingAttributes: ma.Attributes, + Project: request.GetProject(), + MatchingAttributes: ma.GetAttributes(), }, }, nil } @@ -208,11 +208,11 @@ func (m *ResourceManager) GetProjectAttributes(ctx context.Context, request *adm configLevelDefaults := m.config.GetTopLevelConfig().GetAsWorkflowExecutionConfig() if err != nil { ec, ok := err.(errors.FlyteAdminError) - if ok && ec.Code() == codes.NotFound && request.ResourceType == admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG { + if ok && ec.Code() == codes.NotFound && request.GetResourceType() == admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG { // TODO: Will likely be removed after overarching settings project is done return &admin.ProjectAttributesGetResponse{ Attributes: &admin.ProjectAttributes{ - Project: request.Project, + Project: request.GetProject(), MatchingAttributes: &admin.MatchingAttributes{ Target: &admin.MatchingAttributes_WorkflowExecutionConfig{ WorkflowExecutionConfig: configLevelDefaults, @@ -227,14 +227,14 @@ func (m *ResourceManager) GetProjectAttributes(ctx context.Context, request *adm // If found, then merge result with the default values for the platform // TODO: Remove this logic once the overarching settings project is done. Those endpoints should take // default configuration into account. - responseAttributes := getResponse.Attributes.GetMatchingAttributes().GetWorkflowExecutionConfig() + responseAttributes := getResponse.GetAttributes().GetMatchingAttributes().GetWorkflowExecutionConfig() if responseAttributes != nil { logger.Warningf(ctx, "Merging response %s with defaults %s", responseAttributes, configLevelDefaults) tmp := util.MergeIntoExecConfig(responseAttributes, configLevelDefaults) responseAttributes = tmp return &admin.ProjectAttributesGetResponse{ Attributes: &admin.ProjectAttributes{ - Project: request.Project, + Project: request.GetProject(), MatchingAttributes: &admin.MatchingAttributes{ Target: &admin.MatchingAttributes_WorkflowExecutionConfig{ WorkflowExecutionConfig: responseAttributes, @@ -250,14 +250,14 @@ func (m *ResourceManager) GetProjectAttributes(ctx context.Context, request *adm func (m *ResourceManager) DeleteProjectAttributes(ctx context.Context, request *admin.ProjectAttributesDeleteRequest) ( *admin.ProjectAttributesDeleteResponse, error) { - if err := validation.ValidateProjectForUpdate(ctx, m.db, request.Project); err != nil { + if err := validation.ValidateProjectForUpdate(ctx, m.db, request.GetProject()); err != nil { return nil, err } if err := m.db.ResourceRepo().Delete( - ctx, repo_interface.ResourceID{Project: request.Project, ResourceType: request.ResourceType.String()}); err != nil { + ctx, repo_interface.ResourceID{Project: request.GetProject(), ResourceType: request.GetResourceType().String()}); err != nil { return nil, err } - logger.Infof(ctx, "Deleted project attributes for: %s-%s (%s)", request.Project, request.ResourceType.String()) + logger.Infof(ctx, "Deleted project attributes for: %s-%s (%s)", request.GetProject(), request.GetResourceType().String()) return &admin.ProjectAttributesDeleteResponse{}, nil } @@ -285,7 +285,7 @@ func (m *ResourceManager) createOrMergeUpdateProjectDomainAttributes( return nil, err } updatedModel, err := transformers.MergeUpdatePluginAttributes( - ctx, existing, resourceType, &resourceID, request.Attributes.MatchingAttributes) + ctx, existing, resourceType, &resourceID, request.GetAttributes().GetMatchingAttributes()) if err != nil { return nil, err } @@ -321,7 +321,7 @@ func (m *ResourceManager) createOrMergeUpdateProjectAttributes( return nil, err } updatedModel, err := transformers.MergeUpdatePluginAttributes( - ctx, existing, resourceType, &resourceID, request.Attributes.MatchingAttributes) + ctx, existing, resourceType, &resourceID, request.GetAttributes().GetMatchingAttributes()) if err != nil { return nil, err } @@ -340,13 +340,13 @@ func (m *ResourceManager) UpdateProjectDomainAttributes( if resource, err = validation.ValidateProjectDomainAttributesUpdateRequest(ctx, m.db, m.config, request); err != nil { return nil, err } - ctx = contextutils.WithProjectDomain(ctx, request.Attributes.Project, request.Attributes.Domain) + ctx = contextutils.WithProjectDomain(ctx, request.GetAttributes().GetProject(), request.GetAttributes().GetDomain()) - model, err := transformers.ProjectDomainAttributesToResourceModel(request.Attributes, resource) + model, err := transformers.ProjectDomainAttributesToResourceModel(request.GetAttributes(), resource) if err != nil { return nil, err } - if request.Attributes.GetMatchingAttributes().GetPluginOverrides() != nil { + if request.GetAttributes().GetMatchingAttributes().GetPluginOverrides() != nil { return m.createOrMergeUpdateProjectDomainAttributes(ctx, request, model, admin.MatchableResource_PLUGIN_OVERRIDE) } err = m.db.ResourceRepo().CreateOrUpdate(ctx, model) @@ -363,7 +363,7 @@ func (m *ResourceManager) GetProjectDomainAttributes( return nil, err } projectAttributesModel, err := m.db.ResourceRepo().Get( - ctx, repo_interface.ResourceID{Project: request.Project, Domain: request.Domain, ResourceType: request.ResourceType.String()}) + ctx, repo_interface.ResourceID{Project: request.GetProject(), Domain: request.GetDomain(), ResourceType: request.GetResourceType().String()}) if err != nil { return nil, err } @@ -382,11 +382,11 @@ func (m *ResourceManager) DeleteProjectDomainAttributes(ctx context.Context, return nil, err } if err := m.db.ResourceRepo().Delete( - ctx, repo_interface.ResourceID{Project: request.Project, Domain: request.Domain, ResourceType: request.ResourceType.String()}); err != nil { + ctx, repo_interface.ResourceID{Project: request.GetProject(), Domain: request.GetDomain(), ResourceType: request.GetResourceType().String()}); err != nil { return nil, err } - logger.Infof(ctx, "Deleted project-domain attributes for: %s-%s (%s)", request.Project, - request.Domain, request.ResourceType.String()) + logger.Infof(ctx, "Deleted project-domain attributes for: %s-%s (%s)", request.GetProject(), + request.GetDomain(), request.GetResourceType().String()) return &admin.ProjectDomainAttributesDeleteResponse{}, nil } @@ -395,7 +395,7 @@ func (m *ResourceManager) ListAll(ctx context.Context, request *admin.ListMatcha if err := validation.ValidateListAllMatchableAttributesRequest(request); err != nil { return nil, err } - resources, err := m.db.ResourceRepo().ListAll(ctx, request.ResourceType.String()) + resources, err := m.db.ResourceRepo().ListAll(ctx, request.GetResourceType().String()) if err != nil { return nil, err } diff --git a/flyteadmin/pkg/manager/impl/resources/resource_manager_test.go b/flyteadmin/pkg/manager/impl/resources/resource_manager_test.go index 8352de1d7b..be03d642ab 100644 --- a/flyteadmin/pkg/manager/impl/resources/resource_manager_test.go +++ b/flyteadmin/pkg/manager/impl/resources/resource_manager_test.go @@ -83,8 +83,8 @@ func TestUpdateWorkflowAttributes_CreateOrMerge(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Len(t, attributesToBeSaved.GetPluginOverrides().Overrides, 1) - assert.True(t, proto.Equal(attributesToBeSaved.GetPluginOverrides().Overrides[0], &admin.PluginOverride{ + assert.Len(t, attributesToBeSaved.GetPluginOverrides().GetOverrides(), 1) + assert.True(t, proto.Equal(attributesToBeSaved.GetPluginOverrides().GetOverrides()[0], &admin.PluginOverride{ TaskType: "python", PluginId: []string{"plugin a"}})) @@ -127,14 +127,14 @@ func TestUpdateWorkflowAttributes_CreateOrMerge(t *testing.T) { t.Fatal(err) } - assert.Len(t, attributesToBeSaved.GetPluginOverrides().Overrides, 2) - for _, override := range attributesToBeSaved.GetPluginOverrides().Overrides { - if override.TaskType == python { - assert.EqualValues(t, []string{"plugin a"}, override.PluginId) - } else if override.TaskType == hive { - assert.EqualValues(t, []string{"plugin b"}, override.PluginId) + assert.Len(t, attributesToBeSaved.GetPluginOverrides().GetOverrides(), 2) + for _, override := range attributesToBeSaved.GetPluginOverrides().GetOverrides() { + if override.GetTaskType() == python { + assert.EqualValues(t, []string{"plugin a"}, override.GetPluginId()) + } else if override.GetTaskType() == hive { + assert.EqualValues(t, []string{"plugin b"}, override.GetPluginId()) } else { - t.Errorf("Unexpected task type [%s] plugin override committed to db", override.TaskType) + t.Errorf("Unexpected task type [%s] plugin override committed to db", override.GetTaskType()) } } createOrUpdateCalled = true @@ -256,8 +256,8 @@ func TestUpdateProjectDomainAttributes_CreateOrMerge(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Len(t, attributesToBeSaved.GetPluginOverrides().Overrides, 1) - assert.True(t, proto.Equal(attributesToBeSaved.GetPluginOverrides().Overrides[0], &admin.PluginOverride{ + assert.Len(t, attributesToBeSaved.GetPluginOverrides().GetOverrides(), 1) + assert.True(t, proto.Equal(attributesToBeSaved.GetPluginOverrides().GetOverrides()[0], &admin.PluginOverride{ TaskType: python, PluginId: []string{"plugin a"}})) @@ -298,14 +298,14 @@ func TestUpdateProjectDomainAttributes_CreateOrMerge(t *testing.T) { t.Fatal(err) } - assert.Len(t, attributesToBeSaved.GetPluginOverrides().Overrides, 2) - for _, override := range attributesToBeSaved.GetPluginOverrides().Overrides { - if override.TaskType == python { - assert.EqualValues(t, []string{"plugin a"}, override.PluginId) - } else if override.TaskType == hive { - assert.EqualValues(t, []string{"plugin b"}, override.PluginId) + assert.Len(t, attributesToBeSaved.GetPluginOverrides().GetOverrides(), 2) + for _, override := range attributesToBeSaved.GetPluginOverrides().GetOverrides() { + if override.GetTaskType() == python { + assert.EqualValues(t, []string{"plugin a"}, override.GetPluginId()) + } else if override.GetTaskType() == hive { + assert.EqualValues(t, []string{"plugin b"}, override.GetPluginId()) } else { - t.Errorf("Unexpected task type [%s] plugin override committed to db", override.TaskType) + t.Errorf("Unexpected task type [%s] plugin override committed to db", override.GetTaskType()) } } createOrUpdateCalled = true @@ -439,8 +439,8 @@ func TestUpdateProjectAttributes_CreateOrMerge(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Len(t, attributesToBeSaved.GetPluginOverrides().Overrides, 1) - assert.True(t, proto.Equal(attributesToBeSaved.GetPluginOverrides().Overrides[0], &admin.PluginOverride{ + assert.Len(t, attributesToBeSaved.GetPluginOverrides().GetOverrides(), 1) + assert.True(t, proto.Equal(attributesToBeSaved.GetPluginOverrides().GetOverrides()[0], &admin.PluginOverride{ TaskType: python, PluginId: []string{"plugin a"}})) @@ -480,14 +480,14 @@ func TestUpdateProjectAttributes_CreateOrMerge(t *testing.T) { t.Fatal(err) } - assert.Len(t, attributesToBeSaved.GetPluginOverrides().Overrides, 2) - for _, override := range attributesToBeSaved.GetPluginOverrides().Overrides { - if override.TaskType == python { - assert.EqualValues(t, []string{"plugin a"}, override.PluginId) - } else if override.TaskType == hive { - assert.EqualValues(t, []string{"plugin b"}, override.PluginId) + assert.Len(t, attributesToBeSaved.GetPluginOverrides().GetOverrides(), 2) + for _, override := range attributesToBeSaved.GetPluginOverrides().GetOverrides() { + if override.GetTaskType() == python { + assert.EqualValues(t, []string{"plugin a"}, override.GetPluginId()) + } else if override.GetTaskType() == hive { + assert.EqualValues(t, []string{"plugin b"}, override.GetPluginId()) } else { - t.Errorf("Unexpected task type [%s] plugin override committed to db", override.TaskType) + t.Errorf("Unexpected task type [%s] plugin override committed to db", override.GetTaskType()) } } createOrUpdateCalled = true @@ -763,16 +763,16 @@ func TestListAllResources(t *testing.T) { ResourceType: admin.MatchableResource_CLUSTER_RESOURCE, }) assert.Nil(t, err) - assert.NotNil(t, response.Configurations) - assert.Len(t, response.Configurations, 2) + assert.NotNil(t, response.GetConfigurations()) + assert.Len(t, response.GetConfigurations(), 2) assert.True(t, proto.Equal(&admin.MatchableAttributesConfiguration{ Project: "projectA", Attributes: &projectAttributes, - }, response.Configurations[0])) + }, response.GetConfigurations()[0])) assert.True(t, proto.Equal(&admin.MatchableAttributesConfiguration{ Project: "projectB", Domain: "development", Workflow: "workflow", Attributes: &workflowAttributes, - }, response.Configurations[1])) + }, response.GetConfigurations()[1])) } diff --git a/flyteadmin/pkg/manager/impl/signal_manager.go b/flyteadmin/pkg/manager/impl/signal_manager.go index f98edae674..7da9dd5f68 100644 --- a/flyteadmin/pkg/manager/impl/signal_manager.go +++ b/flyteadmin/pkg/manager/impl/signal_manager.go @@ -33,9 +33,9 @@ type SignalManager struct { } func getSignalContext(ctx context.Context, identifier *core.SignalIdentifier) context.Context { - ctx = contextutils.WithProjectDomain(ctx, identifier.ExecutionId.Project, identifier.ExecutionId.Domain) - ctx = contextutils.WithWorkflowID(ctx, identifier.ExecutionId.Name) - return contextutils.WithSignalID(ctx, identifier.SignalId) + ctx = contextutils.WithProjectDomain(ctx, identifier.GetExecutionId().GetProject(), identifier.GetExecutionId().GetDomain()) + ctx = contextutils.WithWorkflowID(ctx, identifier.GetExecutionId().GetName()) + return contextutils.WithSignalID(ctx, identifier.GetSignalId()) } func (s *SignalManager) GetOrCreateSignal(ctx context.Context, request *admin.SignalGetOrCreateRequest) (*admin.Signal, error) { @@ -43,11 +43,11 @@ func (s *SignalManager) GetOrCreateSignal(ctx context.Context, request *admin.Si logger.Debugf(ctx, "invalid request [%+v]: %v", request, err) return nil, err } - ctx = getSignalContext(ctx, request.Id) + ctx = getSignalContext(ctx, request.GetId()) - signalModel, err := transformers.CreateSignalModel(request.Id, request.Type, nil) + signalModel, err := transformers.CreateSignalModel(request.GetId(), request.GetType(), nil) if err != nil { - logger.Errorf(ctx, "Failed to transform signal with id [%+v] and type [+%v] with err: %v", request.Id, request.Type, err) + logger.Errorf(ctx, "Failed to transform signal with id [%+v] and type [+%v] with err: %v", request.GetId(), request.GetType(), err) return nil, err } @@ -70,33 +70,33 @@ func (s *SignalManager) ListSignals(ctx context.Context, request *admin.SignalLi logger.Debugf(ctx, "ListSignals request [%+v] is invalid: %v", request, err) return nil, err } - ctx = getExecutionContext(ctx, request.WorkflowExecutionId) + ctx = getExecutionContext(ctx, request.GetWorkflowExecutionId()) - identifierFilters, err := util.GetWorkflowExecutionIdentifierFilters(ctx, request.WorkflowExecutionId, common.Signal) + identifierFilters, err := util.GetWorkflowExecutionIdentifierFilters(ctx, request.GetWorkflowExecutionId(), common.Signal) if err != nil { return nil, err } - filters, err := util.AddRequestFilters(request.Filters, common.Signal, identifierFilters) + filters, err := util.AddRequestFilters(request.GetFilters(), common.Signal, identifierFilters) if err != nil { return nil, err } - sortParameter, err := common.NewSortParameter(request.SortBy, models.SignalColumns) + sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.SignalColumns) if err != nil { return nil, err } - offset, err := validation.ValidateToken(request.Token) + offset, err := validation.ValidateToken(request.GetToken()) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, - "invalid pagination token %s for ListSignals", request.Token) + "invalid pagination token %s for ListSignals", request.GetToken()) } signalModelList, err := s.db.SignalRepo().List(ctx, repoInterfaces.ListResourceInput{ InlineFilters: filters, Offset: offset, - Limit: int(request.Limit), + Limit: int(request.GetLimit()), SortParameter: sortParameter, }) if err != nil { @@ -111,7 +111,7 @@ func (s *SignalManager) ListSignals(ctx context.Context, request *admin.SignalLi return nil, err } var token string - if len(signalList) == int(request.Limit) { + if len(signalList) == int(request.GetLimit()) { token = strconv.Itoa(offset + len(signalList)) } return &admin.SignalList{ @@ -124,11 +124,11 @@ func (s *SignalManager) SetSignal(ctx context.Context, request *admin.SignalSetR if err := validation.ValidateSignalSetRequest(ctx, s.db, request); err != nil { return nil, err } - ctx = getSignalContext(ctx, request.Id) + ctx = getSignalContext(ctx, request.GetId()) - signalModel, err := transformers.CreateSignalModel(request.Id, nil, request.Value) + signalModel, err := transformers.CreateSignalModel(request.GetId(), nil, request.GetValue()) if err != nil { - logger.Errorf(ctx, "Failed to transform signal with id [%+v] and value [+%v] with err: %v", request.Id, request.Value, err) + logger.Errorf(ctx, "Failed to transform signal with id [%+v] and value [+%v] with err: %v", request.GetId(), request.GetValue(), err) return nil, err } diff --git a/flyteadmin/pkg/manager/impl/task_execution_manager.go b/flyteadmin/pkg/manager/impl/task_execution_manager.go index f8b8e12e21..801d612ab2 100644 --- a/flyteadmin/pkg/manager/impl/task_execution_manager.go +++ b/flyteadmin/pkg/manager/impl/task_execution_manager.go @@ -56,15 +56,15 @@ type TaskExecutionManager struct { } func getTaskExecutionContext(ctx context.Context, identifier *core.TaskExecutionIdentifier) context.Context { - ctx = getNodeExecutionContext(ctx, identifier.NodeExecutionId) - return contextutils.WithTaskID(ctx, fmt.Sprintf("%s-%v", identifier.TaskId.Name, identifier.RetryAttempt)) + ctx = getNodeExecutionContext(ctx, identifier.GetNodeExecutionId()) + return contextutils.WithTaskID(ctx, fmt.Sprintf("%s-%v", identifier.GetTaskId().GetName(), identifier.GetRetryAttempt())) } func (m *TaskExecutionManager) createTaskExecution( ctx context.Context, request *admin.TaskExecutionEventRequest) ( models.TaskExecution, error) { - nodeExecutionID := request.Event.ParentNodeExecutionId + nodeExecutionID := request.GetEvent().GetParentNodeExecutionId() nodeExecutionExists, err := m.db.NodeExecutionRepo().Exists(ctx, repoInterfaces.NodeExecutionResource{ NodeExecutionIdentifier: nodeExecutionID, }) @@ -88,19 +88,19 @@ func (m *TaskExecutionManager) createTaskExecution( StorageClient: m.storageClient, }) if err != nil { - logger.Debugf(ctx, "failed to transform task execution %+v into database model: %v", request.Event.TaskId, err) + logger.Debugf(ctx, "failed to transform task execution %+v into database model: %v", request.GetEvent().GetTaskId(), err) return models.TaskExecution{}, err } if err := m.db.TaskExecutionRepo().Create(ctx, *taskExecutionModel); err != nil { logger.Debugf(ctx, "Failed to create task execution with task id [%+v] with err %v", - request.Event.TaskId, err) + request.GetEvent().GetTaskId(), err) return models.TaskExecution{}, err } m.metrics.TaskExecutionsCreated.Inc() m.metrics.ClosureSizeBytes.Observe(float64(len(taskExecutionModel.Closure))) - logger.Debugf(ctx, "created task execution: %+v", request.Event.TaskId) + logger.Debugf(ctx, "created task execution: %+v", request.GetEvent().GetTaskId()) return *taskExecutionModel, nil } @@ -111,14 +111,14 @@ func (m *TaskExecutionManager) updateTaskExecutionModelState( err := transformers.UpdateTaskExecutionModel(ctx, request, existingTaskExecution, m.config.ApplicationConfiguration().GetRemoteDataConfig().InlineEventDataPolicy, m.storageClient) if err != nil { - logger.Debugf(ctx, "failed to update task execution model [%+v] with err: %v", request.Event.TaskId, err) + logger.Debugf(ctx, "failed to update task execution model [%+v] with err: %v", request.GetEvent().GetTaskId(), err) return models.TaskExecution{}, err } err = m.db.TaskExecutionRepo().Update(ctx, *existingTaskExecution) if err != nil { logger.Debugf(ctx, "Failed to update task execution with task id [%+v] and task execution model [%+v] with err %v", - request.Event.TaskId, existingTaskExecution, err) + request.GetEvent().GetTaskId(), existingTaskExecution, err) return models.TaskExecution{}, err } @@ -132,20 +132,20 @@ func (m *TaskExecutionManager) CreateTaskExecutionEvent(ctx context.Context, req return nil, err } - if err := validation.ValidateClusterForExecutionID(ctx, m.db, request.Event.ParentNodeExecutionId.ExecutionId, request.Event.ProducerId); err != nil { + if err := validation.ValidateClusterForExecutionID(ctx, m.db, request.GetEvent().GetParentNodeExecutionId().GetExecutionId(), request.GetEvent().GetProducerId()); err != nil { return nil, err } // Get the parent node execution, if none found a MissingEntityError will be returned - nodeExecutionID := request.Event.ParentNodeExecutionId + nodeExecutionID := request.GetEvent().GetParentNodeExecutionId() taskExecutionID := &core.TaskExecutionIdentifier{ - TaskId: request.Event.TaskId, + TaskId: request.GetEvent().GetTaskId(), NodeExecutionId: nodeExecutionID, - RetryAttempt: request.Event.RetryAttempt, + RetryAttempt: request.GetEvent().GetRetryAttempt(), } ctx = getTaskExecutionContext(ctx, taskExecutionID) logger.Debugf(ctx, "Received task execution event for [%+v] transitioning to phase [%v]", - taskExecutionID, request.Event.Phase) + taskExecutionID, request.GetEvent().GetPhase()) // See if the task execution exists // - if it does check if the new phase is applicable and then update @@ -166,20 +166,20 @@ func (m *TaskExecutionManager) CreateTaskExecutionEvent(ctx context.Context, req return &admin.TaskExecutionEventResponse{}, nil } - if taskExecutionModel.Phase == request.Event.Phase.String() && - taskExecutionModel.PhaseVersion >= request.Event.PhaseVersion { + if taskExecutionModel.Phase == request.GetEvent().GetPhase().String() && + taskExecutionModel.PhaseVersion >= request.GetEvent().GetPhaseVersion() { logger.Debugf(ctx, "have already recorded task execution phase %s (version: %d) for %v", - request.Event.Phase.String(), request.Event.PhaseVersion, taskExecutionID) + request.GetEvent().GetPhase().String(), request.GetEvent().GetPhaseVersion(), taskExecutionID) return nil, errors.NewFlyteAdminErrorf(codes.AlreadyExists, "have already recorded task execution phase %s (version: %d) for %v", - request.Event.Phase.String(), request.Event.PhaseVersion, taskExecutionID) + request.GetEvent().GetPhase().String(), request.GetEvent().GetPhaseVersion(), taskExecutionID) } currentPhase := core.TaskExecution_Phase(core.TaskExecution_Phase_value[taskExecutionModel.Phase]) if common.IsTaskExecutionTerminal(currentPhase) { // Cannot update a terminal execution. - curPhase := request.Event.Phase.String() - errorMsg := fmt.Sprintf("invalid phase change from %v to %v for task execution %v", taskExecutionModel.Phase, request.Event.Phase, taskExecutionID) + curPhase := request.GetEvent().GetPhase().String() + errorMsg := fmt.Sprintf("invalid phase change from %v to %v for task execution %v", taskExecutionModel.Phase, request.GetEvent().GetPhase(), taskExecutionID) logger.Warnf(ctx, errorMsg) return nil, errors.NewAlreadyInTerminalStateError(ctx, errorMsg, curPhase) } @@ -191,49 +191,49 @@ func (m *TaskExecutionManager) CreateTaskExecutionEvent(ctx context.Context, req return nil, err } - if request.Event.Phase == core.TaskExecution_RUNNING && request.Event.PhaseVersion == 0 { // TODO: need to be careful about missing inc/decs + if request.GetEvent().GetPhase() == core.TaskExecution_RUNNING && request.GetEvent().GetPhaseVersion() == 0 { // TODO: need to be careful about missing inc/decs m.metrics.ActiveTaskExecutions.Inc() - } else if common.IsTaskExecutionTerminal(request.Event.Phase) && request.Event.PhaseVersion == 0 { + } else if common.IsTaskExecutionTerminal(request.GetEvent().GetPhase()) && request.GetEvent().GetPhaseVersion() == 0 { m.metrics.ActiveTaskExecutions.Dec() - m.metrics.TaskExecutionsTerminated.Inc(contextutils.WithPhase(ctx, request.Event.Phase.String())) - if request.Event.GetOutputData() != nil { - m.metrics.TaskExecutionOutputBytes.Observe(float64(proto.Size(request.Event.GetOutputData()))) + m.metrics.TaskExecutionsTerminated.Inc(contextutils.WithPhase(ctx, request.GetEvent().GetPhase().String())) + if request.GetEvent().GetOutputData() != nil { + m.metrics.TaskExecutionOutputBytes.Observe(float64(proto.Size(request.GetEvent().GetOutputData()))) } } if err = m.notificationClient.Publish(ctx, proto.MessageName(request), request); err != nil { m.metrics.PublishEventError.Inc() - logger.Infof(ctx, "error publishing event [%+v] with err: [%v]", request.RequestId, err) + logger.Infof(ctx, "error publishing event [%+v] with err: [%v]", request.GetRequestId(), err) } go func() { ceCtx := context.TODO() if err := m.cloudEventsPublisher.Publish(ceCtx, proto.MessageName(request), request); err != nil { - logger.Errorf(ctx, "error publishing cloud event [%+v] with err: [%v]", request.RequestId, err) + logger.Errorf(ctx, "error publishing cloud event [%+v] with err: [%v]", request.GetRequestId(), err) } }() m.metrics.TaskExecutionEventsCreated.Inc() - logger.Debugf(ctx, "Successfully recorded task execution event [%v]", request.Event) + logger.Debugf(ctx, "Successfully recorded task execution event [%v]", request.GetEvent()) // TODO: we will want to return some scope information here soon! return &admin.TaskExecutionEventResponse{}, nil } func (m *TaskExecutionManager) GetTaskExecution( ctx context.Context, request *admin.TaskExecutionGetRequest) (*admin.TaskExecution, error) { - err := validation.ValidateTaskExecutionIdentifier(request.Id) + err := validation.ValidateTaskExecutionIdentifier(request.GetId()) if err != nil { - logger.Debugf(ctx, "Failed to validate GetTaskExecution [%+v] with err: %v", request.Id, err) + logger.Debugf(ctx, "Failed to validate GetTaskExecution [%+v] with err: %v", request.GetId(), err) return nil, err } - ctx = getTaskExecutionContext(ctx, request.Id) - taskExecutionModel, err := util.GetTaskExecutionModel(ctx, m.db, request.Id) + ctx = getTaskExecutionContext(ctx, request.GetId()) + taskExecutionModel, err := util.GetTaskExecutionModel(ctx, m.db, request.GetId()) if err != nil { return nil, err } taskExecution, err := transformers.FromTaskExecutionModel(*taskExecutionModel, transformers.DefaultExecutionTransformerOptions) if err != nil { - logger.Debugf(ctx, "Failed to transform task execution model [%+v] to proto: %v", request.Id, err) + logger.Debugf(ctx, "Failed to transform task execution model [%+v] to proto: %v", request.GetId(), err) return nil, err } return taskExecution, nil @@ -245,27 +245,27 @@ func (m *TaskExecutionManager) ListTaskExecutions( logger.Debugf(ctx, "ListTaskExecutions request [%+v] is invalid: %v", request, err) return nil, err } - ctx = getNodeExecutionContext(ctx, request.NodeExecutionId) + ctx = getNodeExecutionContext(ctx, request.GetNodeExecutionId()) - identifierFilters, err := util.GetNodeExecutionIdentifierFilters(ctx, request.NodeExecutionId, common.TaskExecution) + identifierFilters, err := util.GetNodeExecutionIdentifierFilters(ctx, request.GetNodeExecutionId(), common.TaskExecution) if err != nil { return nil, err } - filters, err := util.AddRequestFilters(request.Filters, common.TaskExecution, identifierFilters) + filters, err := util.AddRequestFilters(request.GetFilters(), common.TaskExecution, identifierFilters) if err != nil { return nil, err } - sortParameter, err := common.NewSortParameter(request.SortBy, models.TaskExecutionColumns) + sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.TaskExecutionColumns) if err != nil { return nil, err } - offset, err := validation.ValidateToken(request.Token) + offset, err := validation.ValidateToken(request.GetToken()) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, - "invalid pagination token %s for ListTaskExecutions", request.Token) + "invalid pagination token %s for ListTaskExecutions", request.GetToken()) } joinTableEntities := make(map[common.Entity]bool) for _, filter := range filters { @@ -275,7 +275,7 @@ func (m *TaskExecutionManager) ListTaskExecutions( output, err := m.db.TaskExecutionRepo().List(ctx, repoInterfaces.ListResourceInput{ InlineFilters: filters, Offset: offset, - Limit: int(request.Limit), + Limit: int(request.GetLimit()), SortParameter: sortParameter, JoinTableEntities: joinTableEntities, }) @@ -292,7 +292,7 @@ func (m *TaskExecutionManager) ListTaskExecutions( return nil, err } var token string - if len(taskExecutionList) == int(request.Limit) { + if len(taskExecutionList) == int(request.GetLimit()) { token = strconv.Itoa(offset + len(taskExecutionList)) } return &admin.TaskExecutionList{ @@ -303,16 +303,16 @@ func (m *TaskExecutionManager) ListTaskExecutions( func (m *TaskExecutionManager) GetTaskExecutionData( ctx context.Context, request *admin.TaskExecutionGetDataRequest) (*admin.TaskExecutionGetDataResponse, error) { - if err := validation.ValidateTaskExecutionIdentifier(request.Id); err != nil { - logger.Debugf(ctx, "Invalid identifier [%+v]: %v", request.Id, err) + if err := validation.ValidateTaskExecutionIdentifier(request.GetId()); err != nil { + logger.Debugf(ctx, "Invalid identifier [%+v]: %v", request.GetId(), err) } - ctx = getTaskExecutionContext(ctx, request.Id) + ctx = getTaskExecutionContext(ctx, request.GetId()) taskExecution, err := m.GetTaskExecution(ctx, &admin.TaskExecutionGetRequest{ - Id: request.Id, + Id: request.GetId(), }) if err != nil { logger.Debugf(ctx, "Failed to get task execution with id [%+v] with err %v", - request.Id, err) + request.GetId(), err) return nil, err } @@ -322,7 +322,7 @@ func (m *TaskExecutionManager) GetTaskExecutionData( group.Go(func() error { var err error inputs, inputURLBlob, err = util.GetInputs(groupCtx, m.urlData, m.config.ApplicationConfiguration().GetRemoteDataConfig(), - m.storageClient, taskExecution.InputUri) + m.storageClient, taskExecution.GetInputUri()) return err }) @@ -331,7 +331,7 @@ func (m *TaskExecutionManager) GetTaskExecutionData( group.Go(func() error { var err error outputs, outputURLBlob, err = util.GetOutputs(groupCtx, m.urlData, m.config.ApplicationConfiguration().GetRemoteDataConfig(), - m.storageClient, taskExecution.Closure) + m.storageClient, taskExecution.GetClosure()) return err }) @@ -345,14 +345,14 @@ func (m *TaskExecutionManager) GetTaskExecutionData( Outputs: outputURLBlob, FullInputs: inputs, FullOutputs: outputs, - FlyteUrls: common.FlyteURLsFromTaskExecutionID(request.Id, false), + FlyteUrls: common.FlyteURLsFromTaskExecutionID(request.GetId(), false), } - m.metrics.TaskExecutionInputBytes.Observe(float64(response.Inputs.Bytes)) - if response.Outputs.Bytes > 0 { - m.metrics.TaskExecutionOutputBytes.Observe(float64(response.Outputs.Bytes)) - } else if response.FullOutputs != nil { - m.metrics.TaskExecutionOutputBytes.Observe(float64(proto.Size(response.FullOutputs))) + m.metrics.TaskExecutionInputBytes.Observe(float64(response.GetInputs().GetBytes())) + if response.GetOutputs().GetBytes() > 0 { + m.metrics.TaskExecutionOutputBytes.Observe(float64(response.GetOutputs().GetBytes())) + } else if response.GetFullOutputs() != nil { + m.metrics.TaskExecutionOutputBytes.Observe(float64(proto.Size(response.GetFullOutputs()))) } return response, nil } diff --git a/flyteadmin/pkg/manager/impl/task_execution_manager_test.go b/flyteadmin/pkg/manager/impl/task_execution_manager_test.go index 7e2a14131e..939086d63d 100644 --- a/flyteadmin/pkg/manager/impl/task_execution_manager_test.go +++ b/flyteadmin/pkg/manager/impl/task_execution_manager_test.go @@ -72,9 +72,9 @@ func addGetWorkflowExecutionCallback(repository interfaces.Repository) { func(ctx context.Context, input interfaces.Identifier) (models.Execution, error) { return models.Execution{ ExecutionKey: models.ExecutionKey{ - Project: sampleNodeExecID.ExecutionId.Project, - Domain: sampleNodeExecID.ExecutionId.Domain, - Name: sampleNodeExecID.ExecutionId.Name, + Project: sampleNodeExecID.GetExecutionId().GetProject(), + Domain: sampleNodeExecID.GetExecutionId().GetDomain(), + Name: sampleNodeExecID.GetExecutionId().GetName(), }, Cluster: "propeller", }, nil @@ -88,11 +88,11 @@ func addGetNodeExecutionCallback(repository interfaces.Repository) { func(ctx context.Context, input interfaces.NodeExecutionResource) (models.NodeExecution, error) { return models.NodeExecution{ NodeExecutionKey: models.NodeExecutionKey{ - NodeID: sampleNodeExecID.NodeId, + NodeID: sampleNodeExecID.GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: sampleNodeExecID.ExecutionId.Project, - Domain: sampleNodeExecID.ExecutionId.Domain, - Name: sampleNodeExecID.ExecutionId.Name, + Project: sampleNodeExecID.GetExecutionId().GetProject(), + Domain: sampleNodeExecID.GetExecutionId().GetDomain(), + Name: sampleNodeExecID.GetExecutionId().GetName(), }, }, }, nil @@ -105,10 +105,10 @@ func addGetTaskCallback(repository interfaces.Repository) { func(input interfaces.Identifier) (models.Task, error) { return models.Task{ TaskKey: models.TaskKey{ - Project: sampleTaskID.Project, - Domain: sampleTaskID.Domain, - Name: sampleTaskID.Name, - Version: sampleTaskID.Version, + Project: sampleTaskID.GetProject(), + Domain: sampleTaskID.GetDomain(), + Name: sampleTaskID.GetName(), + Version: sampleTaskID.GetVersion(), }, }, nil }, @@ -126,15 +126,15 @@ func TestCreateTaskEvent(t *testing.T) { repository.TaskExecutionRepo().(*repositoryMocks.MockTaskExecutionRepo).SetGetCallback( func(ctx context.Context, input interfaces.GetTaskExecutionInput) (models.TaskExecution, error) { getTaskCalled = true - assert.Equal(t, core.ResourceType_TASK, input.TaskExecutionID.TaskId.ResourceType) - assert.Equal(t, "task-id", input.TaskExecutionID.TaskId.Name) - assert.Equal(t, "project", input.TaskExecutionID.TaskId.Project) - assert.Equal(t, "domain", input.TaskExecutionID.TaskId.Domain) - assert.Equal(t, "task-v", input.TaskExecutionID.TaskId.Version) - assert.Equal(t, "node-id", input.TaskExecutionID.NodeExecutionId.NodeId) - assert.Equal(t, "project", input.TaskExecutionID.NodeExecutionId.ExecutionId.Project) - assert.Equal(t, "domain", input.TaskExecutionID.NodeExecutionId.ExecutionId.Domain) - assert.Equal(t, "name", input.TaskExecutionID.NodeExecutionId.ExecutionId.Name) + assert.Equal(t, core.ResourceType_TASK, input.TaskExecutionID.GetTaskId().GetResourceType()) + assert.Equal(t, "task-id", input.TaskExecutionID.GetTaskId().GetName()) + assert.Equal(t, "project", input.TaskExecutionID.GetTaskId().GetProject()) + assert.Equal(t, "domain", input.TaskExecutionID.GetTaskId().GetDomain()) + assert.Equal(t, "task-v", input.TaskExecutionID.GetTaskId().GetVersion()) + assert.Equal(t, "node-id", input.TaskExecutionID.GetNodeExecutionId().GetNodeId()) + assert.Equal(t, "project", input.TaskExecutionID.GetNodeExecutionId().GetExecutionId().GetProject()) + assert.Equal(t, "domain", input.TaskExecutionID.GetNodeExecutionId().GetExecutionId().GetDomain()) + assert.Equal(t, "name", input.TaskExecutionID.GetNodeExecutionId().GetExecutionId().GetName()) return models.TaskExecution{}, flyteAdminErrors.NewFlyteAdminError(codes.NotFound, "foo") }) @@ -153,17 +153,17 @@ func TestCreateTaskEvent(t *testing.T) { assert.Equal(t, models.TaskExecution{ TaskExecutionKey: models.TaskExecutionKey{ TaskKey: models.TaskKey{ - Project: sampleTaskID.Project, - Domain: sampleTaskID.Domain, - Name: sampleTaskID.Name, - Version: sampleTaskID.Version, + Project: sampleTaskID.GetProject(), + Domain: sampleTaskID.GetDomain(), + Name: sampleTaskID.GetName(), + Version: sampleTaskID.GetVersion(), }, NodeExecutionKey: models.NodeExecutionKey{ - NodeID: sampleNodeExecID.NodeId, + NodeID: sampleNodeExecID.GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: sampleNodeExecID.ExecutionId.Project, - Domain: sampleNodeExecID.ExecutionId.Domain, - Name: sampleNodeExecID.ExecutionId.Name, + Project: sampleNodeExecID.GetExecutionId().GetProject(), + Domain: sampleNodeExecID.GetExecutionId().GetDomain(), + Name: sampleNodeExecID.GetExecutionId().GetName(), }, }, RetryAttempt: &retryAttemptValue, @@ -219,17 +219,17 @@ func TestCreateTaskEvent_Update(t *testing.T) { return models.TaskExecution{ TaskExecutionKey: models.TaskExecutionKey{ TaskKey: models.TaskKey{ - Project: sampleTaskID.Project, - Domain: sampleTaskID.Domain, - Name: sampleTaskID.Name, - Version: sampleTaskID.Version, + Project: sampleTaskID.GetProject(), + Domain: sampleTaskID.GetDomain(), + Name: sampleTaskID.GetName(), + Version: sampleTaskID.GetVersion(), }, NodeExecutionKey: models.NodeExecutionKey{ - NodeID: sampleNodeExecID.NodeId, + NodeID: sampleNodeExecID.GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: sampleNodeExecID.ExecutionId.Project, - Domain: sampleNodeExecID.ExecutionId.Domain, - Name: sampleNodeExecID.ExecutionId.Name, + Project: sampleNodeExecID.GetExecutionId().GetProject(), + Domain: sampleNodeExecID.GetExecutionId().GetDomain(), + Name: sampleNodeExecID.GetExecutionId().GetName(), }, }, }, @@ -266,17 +266,17 @@ func TestCreateTaskEvent_Update(t *testing.T) { assert.EqualValues(t, models.TaskExecution{ TaskExecutionKey: models.TaskExecutionKey{ TaskKey: models.TaskKey{ - Project: sampleTaskID.Project, - Domain: sampleTaskID.Domain, - Name: sampleTaskID.Name, - Version: sampleTaskID.Version, + Project: sampleTaskID.GetProject(), + Domain: sampleTaskID.GetDomain(), + Name: sampleTaskID.GetName(), + Version: sampleTaskID.GetVersion(), }, NodeExecutionKey: models.NodeExecutionKey{ - NodeID: sampleNodeExecID.NodeId, + NodeID: sampleNodeExecID.GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: sampleNodeExecID.ExecutionId.Project, - Domain: sampleNodeExecID.ExecutionId.Domain, - Name: sampleNodeExecID.ExecutionId.Name, + Project: sampleNodeExecID.GetExecutionId().GetProject(), + Domain: sampleNodeExecID.GetExecutionId().GetDomain(), + Name: sampleNodeExecID.GetExecutionId().GetName(), }, }, }, @@ -368,17 +368,17 @@ func TestCreateTaskEvent_UpdateDatabaseError(t *testing.T) { return models.TaskExecution{ TaskExecutionKey: models.TaskExecutionKey{ TaskKey: models.TaskKey{ - Project: sampleTaskID.Project, - Domain: sampleTaskID.Domain, - Name: sampleTaskID.Name, - Version: sampleTaskID.Version, + Project: sampleTaskID.GetProject(), + Domain: sampleTaskID.GetDomain(), + Name: sampleTaskID.GetName(), + Version: sampleTaskID.GetVersion(), }, NodeExecutionKey: models.NodeExecutionKey{ - NodeID: sampleNodeExecID.NodeId, + NodeID: sampleNodeExecID.GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: sampleNodeExecID.ExecutionId.Project, - Domain: sampleNodeExecID.ExecutionId.Domain, - Name: sampleNodeExecID.ExecutionId.Name, + Project: sampleNodeExecID.GetExecutionId().GetProject(), + Domain: sampleNodeExecID.GetExecutionId().GetDomain(), + Name: sampleNodeExecID.GetExecutionId().GetName(), }, }, RetryAttempt: &retryAttemptValue, @@ -407,17 +407,17 @@ func TestCreateTaskEvent_UpdateTerminalEventError(t *testing.T) { return models.TaskExecution{ TaskExecutionKey: models.TaskExecutionKey{ TaskKey: models.TaskKey{ - Project: sampleTaskID.Project, - Domain: sampleTaskID.Domain, - Name: sampleTaskID.Name, - Version: sampleTaskID.Version, + Project: sampleTaskID.GetProject(), + Domain: sampleTaskID.GetDomain(), + Name: sampleTaskID.GetName(), + Version: sampleTaskID.GetVersion(), }, NodeExecutionKey: models.NodeExecutionKey{ - NodeID: sampleNodeExecID.NodeId, + NodeID: sampleNodeExecID.GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: sampleNodeExecID.ExecutionId.Project, - Domain: sampleNodeExecID.ExecutionId.Domain, - Name: sampleNodeExecID.ExecutionId.Name, + Project: sampleNodeExecID.GetExecutionId().GetProject(), + Domain: sampleNodeExecID.GetExecutionId().GetDomain(), + Name: sampleNodeExecID.GetExecutionId().GetName(), }, }, RetryAttempt: &retryAttemptValue, @@ -458,17 +458,17 @@ func TestCreateTaskEvent_PhaseVersionChange(t *testing.T) { return models.TaskExecution{ TaskExecutionKey: models.TaskExecutionKey{ TaskKey: models.TaskKey{ - Project: sampleTaskID.Project, - Domain: sampleTaskID.Domain, - Name: sampleTaskID.Name, - Version: sampleTaskID.Version, + Project: sampleTaskID.GetProject(), + Domain: sampleTaskID.GetDomain(), + Name: sampleTaskID.GetName(), + Version: sampleTaskID.GetVersion(), }, NodeExecutionKey: models.NodeExecutionKey{ - NodeID: sampleNodeExecID.NodeId, + NodeID: sampleNodeExecID.GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: sampleNodeExecID.ExecutionId.Project, - Domain: sampleNodeExecID.ExecutionId.Domain, - Name: sampleNodeExecID.ExecutionId.Name, + Project: sampleNodeExecID.GetExecutionId().GetProject(), + Domain: sampleNodeExecID.GetExecutionId().GetDomain(), + Name: sampleNodeExecID.GetExecutionId().GetName(), }, }, }, @@ -526,23 +526,23 @@ func TestGetTaskExecution(t *testing.T) { repository.TaskExecutionRepo().(*repositoryMocks.MockTaskExecutionRepo).SetGetCallback( func(ctx context.Context, input interfaces.GetTaskExecutionInput) (models.TaskExecution, error) { getTaskCalled = true - assert.Equal(t, sampleTaskID, input.TaskExecutionID.TaskId) - assert.Equal(t, sampleNodeExecID, input.TaskExecutionID.NodeExecutionId) - assert.Equal(t, uint32(1), input.TaskExecutionID.RetryAttempt) + assert.Equal(t, sampleTaskID, input.TaskExecutionID.GetTaskId()) + assert.Equal(t, sampleNodeExecID, input.TaskExecutionID.GetNodeExecutionId()) + assert.Equal(t, uint32(1), input.TaskExecutionID.GetRetryAttempt()) return models.TaskExecution{ TaskExecutionKey: models.TaskExecutionKey{ TaskKey: models.TaskKey{ - Project: sampleTaskID.Project, - Domain: sampleTaskID.Domain, - Name: sampleTaskID.Name, - Version: sampleTaskID.Version, + Project: sampleTaskID.GetProject(), + Domain: sampleTaskID.GetDomain(), + Name: sampleTaskID.GetName(), + Version: sampleTaskID.GetVersion(), }, NodeExecutionKey: models.NodeExecutionKey{ - NodeID: sampleNodeExecID.NodeId, + NodeID: sampleNodeExecID.GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: sampleNodeExecID.ExecutionId.Project, - Domain: sampleNodeExecID.ExecutionId.Domain, - Name: sampleNodeExecID.ExecutionId.Name, + Project: sampleNodeExecID.GetExecutionId().GetProject(), + Domain: sampleNodeExecID.GetExecutionId().GetDomain(), + Name: sampleNodeExecID.GetExecutionId().GetName(), }, }, RetryAttempt: &retryAttemptValue, @@ -581,17 +581,17 @@ func TestGetTaskExecution_TransformerError(t *testing.T) { return models.TaskExecution{ TaskExecutionKey: models.TaskExecutionKey{ TaskKey: models.TaskKey{ - Project: sampleTaskID.Project, - Domain: sampleTaskID.Domain, - Name: sampleTaskID.Name, - Version: sampleTaskID.Version, + Project: sampleTaskID.GetProject(), + Domain: sampleTaskID.GetDomain(), + Name: sampleTaskID.GetName(), + Version: sampleTaskID.GetVersion(), }, NodeExecutionKey: models.NodeExecutionKey{ - NodeID: sampleNodeExecID.NodeId, + NodeID: sampleNodeExecID.GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: sampleNodeExecID.ExecutionId.Project, - Domain: sampleNodeExecID.ExecutionId.Domain, - Name: sampleNodeExecID.ExecutionId.Name, + Project: sampleNodeExecID.GetExecutionId().GetProject(), + Domain: sampleNodeExecID.GetExecutionId().GetDomain(), + Name: sampleNodeExecID.GetExecutionId().GetName(), }, }, RetryAttempt: &retryAttemptValue, @@ -752,7 +752,7 @@ func TestListTaskExecutions(t *testing.T) { }, InputUri: "input-uri.pb", Closure: expectedClosure, - }, taskExecutions.TaskExecutions[0])) + }, taskExecutions.GetTaskExecutions()[0])) assert.True(t, proto.Equal(&admin.TaskExecution{ Id: &core.TaskExecutionIdentifier{ RetryAttempt: secondRetryAttempt, @@ -774,7 +774,7 @@ func TestListTaskExecutions(t *testing.T) { }, InputUri: "input-uri2.pb", Closure: expectedClosure, - }, taskExecutions.TaskExecutions[1])) + }, taskExecutions.GetTaskExecutions()[1])) } func TestListTaskExecutions_Filters(t *testing.T) { @@ -925,7 +925,7 @@ func TestListTaskExecutions_Filters(t *testing.T) { }, InputUri: "input-uri.pb", Closure: expectedClosure, - }, taskExecutions.TaskExecutions[0])) + }, taskExecutions.GetTaskExecutions()[0])) assert.True(t, proto.Equal(&admin.TaskExecution{ Id: &core.TaskExecutionIdentifier{ RetryAttempt: secondRetryAttempt, @@ -947,7 +947,7 @@ func TestListTaskExecutions_Filters(t *testing.T) { }, InputUri: "input-uri2.pb", Closure: expectedClosure, - }, taskExecutions.TaskExecutions[1])) + }, taskExecutions.GetTaskExecutions()[1])) } func TestListTaskExecutions_NoFilters(t *testing.T) { @@ -1049,17 +1049,17 @@ func TestGetTaskExecutionData(t *testing.T) { return models.TaskExecution{ TaskExecutionKey: models.TaskExecutionKey{ TaskKey: models.TaskKey{ - Project: sampleTaskID.Project, - Domain: sampleTaskID.Domain, - Name: sampleTaskID.Name, - Version: sampleTaskID.Version, + Project: sampleTaskID.GetProject(), + Domain: sampleTaskID.GetDomain(), + Name: sampleTaskID.GetName(), + Version: sampleTaskID.GetVersion(), }, NodeExecutionKey: models.NodeExecutionKey{ - NodeID: sampleNodeExecID.NodeId, + NodeID: sampleNodeExecID.GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: sampleNodeExecID.ExecutionId.Project, - Domain: sampleNodeExecID.ExecutionId.Domain, - Name: sampleNodeExecID.ExecutionId.Name, + Project: sampleNodeExecID.GetExecutionId().GetProject(), + Domain: sampleNodeExecID.GetExecutionId().GetDomain(), + Name: sampleNodeExecID.GetExecutionId().GetName(), }, }, RetryAttempt: &retryAttemptValue, diff --git a/flyteadmin/pkg/manager/impl/task_manager.go b/flyteadmin/pkg/manager/impl/task_manager.go index 157bcab5cd..7d903e98fb 100644 --- a/flyteadmin/pkg/manager/impl/task_manager.go +++ b/flyteadmin/pkg/manager/impl/task_manager.go @@ -44,44 +44,44 @@ type TaskManager struct { } func getTaskContext(ctx context.Context, identifier *core.Identifier) context.Context { - ctx = contextutils.WithProjectDomain(ctx, identifier.Project, identifier.Domain) - return contextutils.WithTaskID(ctx, identifier.Name) + ctx = contextutils.WithProjectDomain(ctx, identifier.GetProject(), identifier.GetDomain()) + return contextutils.WithTaskID(ctx, identifier.GetName()) } func setDefaults(request *admin.TaskCreateRequest) (*admin.TaskCreateRequest, error) { - if request.Id == nil { + if request.GetId() == nil { return request, errors.NewFlyteAdminError(codes.InvalidArgument, "missing identifier for TaskCreateRequest") } - request.Spec.Template.Id = request.Id + request.Spec.Template.Id = request.GetId() return request, nil } func (t *TaskManager) CreateTask( ctx context.Context, request *admin.TaskCreateRequest) (*admin.TaskCreateResponse, error) { - platformTaskResources := util.GetTaskResources(ctx, request.Id, t.resourceManager, t.config.TaskResourceConfiguration()) + platformTaskResources := util.GetTaskResources(ctx, request.GetId(), t.resourceManager, t.config.TaskResourceConfiguration()) if err := validation.ValidateTask(ctx, request, t.db, platformTaskResources, t.config.WhitelistConfiguration(), t.config.ApplicationConfiguration()); err != nil { - logger.Debugf(ctx, "Task [%+v] failed validation with err: %v", request.Id, err) + logger.Debugf(ctx, "Task [%+v] failed validation with err: %v", request.GetId(), err) return nil, err } - ctx = getTaskContext(ctx, request.Id) + ctx = getTaskContext(ctx, request.GetId()) finalizedRequest, err := setDefaults(request) if err != nil { return nil, err } // Compile task and store the compiled version in the database. - compiledTask, err := t.compiler.CompileTask(finalizedRequest.Spec.Template) + compiledTask, err := t.compiler.CompileTask(finalizedRequest.GetSpec().GetTemplate()) if err != nil { - logger.Debugf(ctx, "Failed to compile task with id [%+v] with err %v", request.Id, err) + logger.Debugf(ctx, "Failed to compile task with id [%+v] with err %v", request.GetId(), err) return nil, err } createdAt, err := ptypes.TimestampProto(time.Now()) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.Internal, - "Failed to serialize CreatedAt: %v when creating task: %+v", err, request.Id) + "Failed to serialize CreatedAt: %v when creating task: %+v", err, request.GetId()) } taskDigest, err := util.GetTaskDigest(ctx, compiledTask) if err != nil { @@ -89,7 +89,7 @@ func (t *TaskManager) CreateTask( return nil, err } // See if a task exists and confirm whether it's an identical task or one that with a separate definition. - existingTaskModel, err := util.GetTaskModel(ctx, t.db, request.Spec.Template.Id) + existingTaskModel, err := util.GetTaskModel(ctx, t.db, request.GetSpec().GetTemplate().GetId()) if err == nil { if bytes.Equal(taskDigest, existingTaskModel.Digest) { return nil, errors.NewTaskExistsIdenticalStructureError(ctx, request) @@ -99,7 +99,7 @@ func (t *TaskManager) CreateTask( logger.Errorf(ctx, "failed to transform task from task model") return nil, transformerErr } - return nil, errors.NewTaskExistsDifferentStructureError(ctx, request, existingTask.Closure.GetCompiledTask(), compiledTask) + return nil, errors.NewTaskExistsDifferentStructureError(ctx, request, existingTask.GetClosure().GetCompiledTask(), compiledTask) } taskModel, err := transformers.CreateTaskModel(finalizedRequest, &admin.TaskClosure{ CompiledTask: compiledTask, @@ -111,10 +111,10 @@ func (t *TaskManager) CreateTask( return nil, err } - descriptionModel, err := transformers.CreateDescriptionEntityModel(request.Spec.Description, request.Id) + descriptionModel, err := transformers.CreateDescriptionEntityModel(request.GetSpec().GetDescription(), request.GetId()) if err != nil { logger.Errorf(ctx, - "Failed to transform description model [%+v] with err: %v", request.Spec.Description, err) + "Failed to transform description model [%+v] with err: %v", request.GetSpec().GetDescription(), err) return nil, err } if descriptionModel != nil { @@ -122,15 +122,15 @@ func (t *TaskManager) CreateTask( } err = t.db.TaskRepo().Create(ctx, taskModel, descriptionModel) if err != nil { - logger.Debugf(ctx, "Failed to create task model with id [%+v] with err %v", request.Id, err) + logger.Debugf(ctx, "Failed to create task model with id [%+v] with err %v", request.GetId(), err) return nil, err } t.metrics.ClosureSizeBytes.Observe(float64(len(taskModel.Closure))) - if finalizedRequest.Spec.Template.Metadata != nil { + if finalizedRequest.GetSpec().GetTemplate().GetMetadata() != nil { contextWithRuntimeMeta := context.WithValue( - ctx, common.RuntimeTypeKey, finalizedRequest.Spec.Template.Metadata.Runtime.Type.String()) + ctx, common.RuntimeTypeKey, finalizedRequest.GetSpec().GetTemplate().GetMetadata().GetRuntime().GetType().String()) contextWithRuntimeMeta = context.WithValue( - contextWithRuntimeMeta, common.RuntimeVersionKey, finalizedRequest.Spec.Template.Metadata.Runtime.Version) + contextWithRuntimeMeta, common.RuntimeVersionKey, finalizedRequest.GetSpec().GetTemplate().GetMetadata().GetRuntime().GetVersion()) t.metrics.Registered.Inc(contextWithRuntimeMeta) } @@ -138,13 +138,13 @@ func (t *TaskManager) CreateTask( } func (t *TaskManager) GetTask(ctx context.Context, request *admin.ObjectGetRequest) (*admin.Task, error) { - if err := validation.ValidateIdentifier(request.Id, common.Task); err != nil { - logger.Debugf(ctx, "invalid identifier [%+v]: %v", request.Id, err) + if err := validation.ValidateIdentifier(request.GetId(), common.Task); err != nil { + logger.Debugf(ctx, "invalid identifier [%+v]: %v", request.GetId(), err) } - ctx = getTaskContext(ctx, request.Id) - task, err := util.GetTask(ctx, t.db, request.Id) + ctx = getTaskContext(ctx, request.GetId()) + task, err := util.GetTask(ctx, t.db, request.GetId()) if err != nil { - logger.Debugf(ctx, "Failed to get task with id [%+v] with err %v", err, request.Id) + logger.Debugf(ctx, "Failed to get task with id [%+v] with err %v", err, request.GetId()) return nil, err } return task, nil @@ -156,13 +156,13 @@ func (t *TaskManager) ListTasks(ctx context.Context, request *admin.ResourceList logger.Debugf(ctx, "Invalid request [%+v]: %v", request, err) return nil, err } - ctx = contextutils.WithProjectDomain(ctx, request.Id.Project, request.Id.Domain) - ctx = contextutils.WithTaskID(ctx, request.Id.Name) + ctx = contextutils.WithProjectDomain(ctx, request.GetId().GetProject(), request.GetId().GetDomain()) + ctx = contextutils.WithTaskID(ctx, request.GetId().GetName()) spec := util.FilterSpec{ - Project: request.Id.Project, - Domain: request.Id.Domain, - Name: request.Id.Name, - RequestFilters: request.Filters, + Project: request.GetId().GetProject(), + Domain: request.GetId().GetDomain(), + Name: request.GetId().GetName(), + RequestFilters: request.GetFilters(), } filters, err := util.GetDbFilters(spec, common.Task) @@ -170,26 +170,26 @@ func (t *TaskManager) ListTasks(ctx context.Context, request *admin.ResourceList return nil, err } - sortParameter, err := common.NewSortParameter(request.SortBy, models.TaskColumns) + sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.TaskColumns) if err != nil { return nil, err } - offset, err := validation.ValidateToken(request.Token) + offset, err := validation.ValidateToken(request.GetToken()) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, - "invalid pagination token %s for ListTasks", request.Token) + "invalid pagination token %s for ListTasks", request.GetToken()) } // And finally, query the database listTasksInput := repoInterfaces.ListResourceInput{ - Limit: int(request.Limit), + Limit: int(request.GetLimit()), Offset: offset, InlineFilters: filters, SortParameter: sortParameter, } output, err := t.db.TaskRepo().List(ctx, listTasksInput) if err != nil { - logger.Debugf(ctx, "Failed to list tasks with id [%+v] with err %v", request.Id, err) + logger.Debugf(ctx, "Failed to list tasks with id [%+v] with err %v", request.GetId(), err) return nil, err } taskList, err := transformers.FromTaskModels(output.Tasks) @@ -200,7 +200,7 @@ func (t *TaskManager) ListTasks(ctx context.Context, request *admin.ResourceList } var token string - if len(taskList) == int(request.Limit) { + if len(taskList) == int(request.GetLimit()) { token = strconv.Itoa(offset + len(taskList)) } return &admin.TaskList{ @@ -217,27 +217,27 @@ func (t *TaskManager) ListUniqueTaskIdentifiers(ctx context.Context, request *ad logger.Debugf(ctx, "invalid request [%+v]: %v", request, err) return nil, err } - ctx = contextutils.WithProjectDomain(ctx, request.Project, request.Domain) + ctx = contextutils.WithProjectDomain(ctx, request.GetProject(), request.GetDomain()) filters, err := util.GetDbFilters(util.FilterSpec{ - Project: request.Project, - Domain: request.Domain, + Project: request.GetProject(), + Domain: request.GetDomain(), }, common.Task) if err != nil { return nil, err } - sortParameter, err := common.NewSortParameter(request.SortBy, models.TaskColumns) + sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.TaskColumns) if err != nil { return nil, err } - offset, err := validation.ValidateToken(request.Token) + offset, err := validation.ValidateToken(request.GetToken()) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, - "invalid pagination token %s for ListUniqueTaskIdentifiers", request.Token) + "invalid pagination token %s for ListUniqueTaskIdentifiers", request.GetToken()) } listTasksInput := repoInterfaces.ListResourceInput{ - Limit: int(request.Limit), + Limit: int(request.GetLimit()), Offset: offset, InlineFilters: filters, SortParameter: sortParameter, @@ -246,13 +246,13 @@ func (t *TaskManager) ListUniqueTaskIdentifiers(ctx context.Context, request *ad output, err := t.db.TaskRepo().ListTaskIdentifiers(ctx, listTasksInput) if err != nil { logger.Debugf(ctx, "Failed to list tasks ids with project: %s and domain: %s with err %v", - request.Project, request.Domain, err) + request.GetProject(), request.GetDomain(), err) return nil, err } idList := transformers.FromTaskModelsToIdentifiers(output.Tasks) var token string - if len(idList) == int(request.Limit) { + if len(idList) == int(request.GetLimit()) { token = strconv.Itoa(offset + len(idList)) } return &admin.NamedEntityIdentifierList{ diff --git a/flyteadmin/pkg/manager/impl/task_manager_test.go b/flyteadmin/pkg/manager/impl/task_manager_test.go index 4308fc2626..1301444ceb 100644 --- a/flyteadmin/pkg/manager/impl/task_manager_test.go +++ b/flyteadmin/pkg/manager/impl/task_manager_test.go @@ -172,11 +172,11 @@ func TestGetTask(t *testing.T) { Id: &taskIdentifier, }) assert.NoError(t, err) - assert.Equal(t, "project", task.Id.Project) - assert.Equal(t, "domain", task.Id.Domain) - assert.Equal(t, "name", task.Id.Name) - assert.Equal(t, "version", task.Id.Version) - assert.True(t, proto.Equal(testutils.GetTaskClosure(), task.Closure)) + assert.Equal(t, "project", task.GetId().GetProject()) + assert.Equal(t, "domain", task.GetId().GetDomain()) + assert.Equal(t, "name", task.GetId().GetName()) + assert.Equal(t, "version", task.GetId().GetVersion()) + assert.True(t, proto.Equal(testutils.GetTaskClosure(), task.GetClosure())) } func TestGetTask_DatabaseError(t *testing.T) { @@ -287,18 +287,18 @@ func TestListTasks(t *testing.T) { }) assert.NoError(t, err) assert.NotNil(t, taskList) - assert.Len(t, taskList.Tasks, 2) + assert.Len(t, taskList.GetTasks(), 2) - for idx, task := range taskList.Tasks { - assert.Equal(t, projectValue, task.Id.Project) - assert.Equal(t, domainValue, task.Id.Domain) - assert.Equal(t, nameValue, task.Id.Name) - assert.Equal(t, fmt.Sprintf("version %v", idx), task.Id.Version) + for idx, task := range taskList.GetTasks() { + assert.Equal(t, projectValue, task.GetId().GetProject()) + assert.Equal(t, domainValue, task.GetId().GetDomain()) + assert.Equal(t, nameValue, task.GetId().GetName()) + assert.Equal(t, fmt.Sprintf("version %v", idx), task.GetId().GetVersion()) assert.True(t, proto.Equal(&admin.TaskClosure{ CreatedAt: testutils.MockCreatedAtProto, - }, task.Closure)) + }, task.GetClosure())) } - assert.Equal(t, "2", taskList.Token) + assert.Equal(t, "2", taskList.GetToken()) } func TestListTasks_MissingParameters(t *testing.T) { @@ -401,6 +401,6 @@ func TestListUniqueTaskIdentifiers(t *testing.T) { }) assert.NoError(t, err) - assert.Equal(t, 2, len(resp.Entities)) - assert.Empty(t, resp.Token) + assert.Equal(t, 2, len(resp.GetEntities())) + assert.Empty(t, resp.GetToken()) } diff --git a/flyteadmin/pkg/manager/impl/testutils/mock_closures.go b/flyteadmin/pkg/manager/impl/testutils/mock_closures.go index 6554bd6403..945310daf1 100644 --- a/flyteadmin/pkg/manager/impl/testutils/mock_closures.go +++ b/flyteadmin/pkg/manager/impl/testutils/mock_closures.go @@ -17,7 +17,7 @@ var MockCreatedAtProto, _ = ptypes.TimestampProto(MockCreatedAtValue) func GetTaskClosure() *admin.TaskClosure { return &admin.TaskClosure{ CompiledTask: &core.CompiledTask{ - Template: GetValidTaskRequest().Spec.Template, + Template: GetValidTaskRequest().GetSpec().GetTemplate(), }, CreatedAt: MockCreatedAtProto, } @@ -32,11 +32,11 @@ func GetWorkflowClosure() *admin.WorkflowClosure { return &admin.WorkflowClosure{ CompiledWorkflow: &core.CompiledWorkflowClosure{ Primary: &core.CompiledWorkflow{ - Template: GetWorkflowRequest().Spec.Template, + Template: GetWorkflowRequest().GetSpec().GetTemplate(), }, Tasks: []*core.CompiledTask{ { - Template: GetValidTaskRequest().Spec.Template, + Template: GetValidTaskRequest().GetSpec().GetTemplate(), }, }, }, diff --git a/flyteadmin/pkg/manager/impl/testutils/mock_requests.go b/flyteadmin/pkg/manager/impl/testutils/mock_requests.go index b3d01897f1..64ab792220 100644 --- a/flyteadmin/pkg/manager/impl/testutils/mock_requests.go +++ b/flyteadmin/pkg/manager/impl/testutils/mock_requests.go @@ -328,10 +328,10 @@ func GetSampleLpSpecForTest() *admin.LaunchPlanSpec { } func GetWorkflowRequestInterfaceBytes() []byte { - bytes, _ := proto.Marshal(GetWorkflowRequest().Spec.Template.Interface) + bytes, _ := proto.Marshal(GetWorkflowRequest().GetSpec().GetTemplate().GetInterface()) return bytes } func GetWorkflowRequestInterface() *core.TypedInterface { - return GetWorkflowRequest().Spec.Template.Interface + return GetWorkflowRequest().GetSpec().GetTemplate().GetInterface() } diff --git a/flyteadmin/pkg/manager/impl/util/data.go b/flyteadmin/pkg/manager/impl/util/data.go index d6fe14af2e..1827cfd167 100644 --- a/flyteadmin/pkg/manager/impl/util/data.go +++ b/flyteadmin/pkg/manager/impl/util/data.go @@ -21,7 +21,7 @@ const ( func shouldFetchData(config *runtimeInterfaces.RemoteDataConfig, urlBlob *admin.UrlBlob) bool { return config.Scheme == common.Local || config.Scheme == common.None || config.MaxSizeInBytes == 0 || - urlBlob.Bytes < config.MaxSizeInBytes + urlBlob.GetBytes() < config.MaxSizeInBytes } func shouldFetchOutputData(config *runtimeInterfaces.RemoteDataConfig, urlBlob *admin.UrlBlob, outputURI string) bool { diff --git a/flyteadmin/pkg/manager/impl/util/digests.go b/flyteadmin/pkg/manager/impl/util/digests.go index 2846490f71..6fd31273c2 100644 --- a/flyteadmin/pkg/manager/impl/util/digests.go +++ b/flyteadmin/pkg/manager/impl/util/digests.go @@ -17,9 +17,9 @@ func GetLaunchPlanDigest(ctx context.Context, launchPlan *admin.LaunchPlan) ([]b launchPlanDigest, err := pbhash.ComputeHash(ctx, launchPlan) if err != nil { logger.Warningf(ctx, "failed to hash launch plan [%+v] to digest with err %v", - launchPlan.Id, err) + launchPlan.GetId(), err) return nil, errors.NewFlyteAdminErrorf(codes.Internal, - "failed to hash launch plan [%+v] to digest with err %v", launchPlan.Id, err) + "failed to hash launch plan [%+v] to digest with err %v", launchPlan.GetId(), err) } return launchPlanDigest, nil @@ -30,9 +30,9 @@ func GetTaskDigest(ctx context.Context, task *core.CompiledTask) ([]byte, error) taskDigest, err := pbhash.ComputeHash(ctx, task) if err != nil { logger.Warningf(ctx, "failed to hash task [%+v] to digest with err %v", - task.Template.Id, err) + task.GetTemplate().GetId(), err) return nil, errors.NewFlyteAdminErrorf(codes.Internal, - "failed to hash task [%+v] to digest with err %v", task.Template.Id, err) + "failed to hash task [%+v] to digest with err %v", task.GetTemplate().GetId(), err) } return taskDigest, nil @@ -43,9 +43,9 @@ func GetWorkflowDigest(ctx context.Context, workflowClosure *core.CompiledWorkfl workflowDigest, err := pbhash.ComputeHash(ctx, workflowClosure) if err != nil { logger.Warningf(ctx, "failed to hash workflow [%+v] to digest with err %v", - workflowClosure.Primary.Template.Id, err) + workflowClosure.GetPrimary().GetTemplate().GetId(), err) return nil, errors.NewFlyteAdminErrorf(codes.Internal, - "failed to hash workflow [%+v] to digest with err %v", workflowClosure.Primary.Template.Id, err) + "failed to hash workflow [%+v] to digest with err %v", workflowClosure.GetPrimary().GetTemplate().GetId(), err) } return workflowDigest, nil diff --git a/flyteadmin/pkg/manager/impl/util/digests_test.go b/flyteadmin/pkg/manager/impl/util/digests_test.go index ee3ea93d19..870fbd4cbd 100644 --- a/flyteadmin/pkg/manager/impl/util/digests_test.go +++ b/flyteadmin/pkg/manager/impl/util/digests_test.go @@ -149,7 +149,7 @@ func TestGetWorkflowDigest_Unequal(t *testing.T) { workflowWithDifferentNodes, err := getCompiledWorkflow() assert.Nil(t, err) workflowWithDifferentNodes.Primary.Template.Nodes = append( - workflowWithDifferentNodes.Primary.Template.Nodes, &core.Node{ + workflowWithDifferentNodes.GetPrimary().GetTemplate().GetNodes(), &core.Node{ Id: "unexpected", }) workflowDigest, err := GetWorkflowDigest(context.Background(), workflowWithDifferentNodes) diff --git a/flyteadmin/pkg/manager/impl/util/filters.go b/flyteadmin/pkg/manager/impl/util/filters.go index 377dcdab51..b6426a3852 100644 --- a/flyteadmin/pkg/manager/impl/util/filters.go +++ b/flyteadmin/pkg/manager/impl/util/filters.go @@ -274,28 +274,28 @@ func GetWorkflowExecutionIdentifierFilters( ctx context.Context, workflowExecutionIdentifier *core.WorkflowExecutionIdentifier, entity common.Entity) ([]common.InlineFilter, error) { identifierFilters := make([]common.InlineFilter, 3) identifierProjectFilter, err := GetSingleValueEqualityFilter( - entity, shared.Project, workflowExecutionIdentifier.Project) + entity, shared.Project, workflowExecutionIdentifier.GetProject()) if err != nil { logger.Warningf(ctx, "Failed to create execution identifier filter for project: %s with identifier [%+v]", - workflowExecutionIdentifier.Project, workflowExecutionIdentifier) + workflowExecutionIdentifier.GetProject(), workflowExecutionIdentifier) return nil, err } identifierFilters[0] = identifierProjectFilter identifierDomainFilter, err := GetSingleValueEqualityFilter( - entity, shared.Domain, workflowExecutionIdentifier.Domain) + entity, shared.Domain, workflowExecutionIdentifier.GetDomain()) if err != nil { logger.Warningf(ctx, "Failed to create execution identifier filter for domain: %s with identifier [%+v]", - workflowExecutionIdentifier.Domain, workflowExecutionIdentifier) + workflowExecutionIdentifier.GetDomain(), workflowExecutionIdentifier) return nil, err } identifierFilters[1] = identifierDomainFilter identifierNameFilter, err := GetSingleValueEqualityFilter( - entity, shared.Name, workflowExecutionIdentifier.Name) + entity, shared.Name, workflowExecutionIdentifier.GetName()) if err != nil { logger.Warningf(ctx, "Failed to create execution identifier filter for domain: %s with identifier [%+v]", - workflowExecutionIdentifier.Name, workflowExecutionIdentifier) + workflowExecutionIdentifier.GetName(), workflowExecutionIdentifier) return nil, err } identifierFilters[2] = identifierNameFilter @@ -306,15 +306,15 @@ func GetWorkflowExecutionIdentifierFilters( func GetNodeExecutionIdentifierFilters( ctx context.Context, nodeExecutionIdentifier *core.NodeExecutionIdentifier, entity common.Entity) ([]common.InlineFilter, error) { workflowExecutionIdentifierFilters, err := - GetWorkflowExecutionIdentifierFilters(ctx, nodeExecutionIdentifier.ExecutionId, entity) + GetWorkflowExecutionIdentifierFilters(ctx, nodeExecutionIdentifier.GetExecutionId(), entity) if err != nil { return nil, err } nodeIDFilter, err := GetSingleValueEqualityFilter( - entity, shared.NodeID, nodeExecutionIdentifier.NodeId) + entity, shared.NodeID, nodeExecutionIdentifier.GetNodeId()) if err != nil { logger.Warningf(ctx, "Failed to create node execution identifier filter for node id: %s with identifier [%+v]", - nodeExecutionIdentifier.NodeId, nodeExecutionIdentifier) + nodeExecutionIdentifier.GetNodeId(), nodeExecutionIdentifier) } return append(workflowExecutionIdentifierFilters, nodeIDFilter), nil } diff --git a/flyteadmin/pkg/manager/impl/util/resources.go b/flyteadmin/pkg/manager/impl/util/resources.go index 79aadb61b2..cd92bb671d 100644 --- a/flyteadmin/pkg/manager/impl/util/resources.go +++ b/flyteadmin/pkg/manager/impl/util/resources.go @@ -31,16 +31,16 @@ func getTaskResourcesAsSet(ctx context.Context, identifier *core.Identifier, result := runtimeInterfaces.TaskResourceSet{} for _, entry := range resourceEntries { - switch entry.Name { + switch entry.GetName() { case core.Resources_CPU: - result.CPU = parseQuantityNoError(ctx, identifier.String(), fmt.Sprintf("%v.cpu", resourceName), entry.Value) + result.CPU = parseQuantityNoError(ctx, identifier.String(), fmt.Sprintf("%v.cpu", resourceName), entry.GetValue()) case core.Resources_MEMORY: - result.Memory = parseQuantityNoError(ctx, identifier.String(), fmt.Sprintf("%v.memory", resourceName), entry.Value) + result.Memory = parseQuantityNoError(ctx, identifier.String(), fmt.Sprintf("%v.memory", resourceName), entry.GetValue()) case core.Resources_EPHEMERAL_STORAGE: result.EphemeralStorage = parseQuantityNoError(ctx, identifier.String(), - fmt.Sprintf("%v.ephemeral storage", resourceName), entry.Value) + fmt.Sprintf("%v.ephemeral storage", resourceName), entry.GetValue()) case core.Resources_GPU: - result.GPU = parseQuantityNoError(ctx, identifier.String(), "gpu", entry.Value) + result.GPU = parseQuantityNoError(ctx, identifier.String(), "gpu", entry.GetValue()) } } @@ -50,28 +50,28 @@ func getTaskResourcesAsSet(ctx context.Context, identifier *core.Identifier, // GetCompleteTaskResourceRequirements parses the resource requests and limits from the `TaskTemplate` Container. func GetCompleteTaskResourceRequirements(ctx context.Context, identifier *core.Identifier, task *core.CompiledTask) workflowengineInterfaces.TaskResources { return workflowengineInterfaces.TaskResources{ - Defaults: getTaskResourcesAsSet(ctx, identifier, task.GetTemplate().GetContainer().Resources.Requests, "requests"), - Limits: getTaskResourcesAsSet(ctx, identifier, task.GetTemplate().GetContainer().Resources.Limits, "limits"), + Defaults: getTaskResourcesAsSet(ctx, identifier, task.GetTemplate().GetContainer().GetResources().GetRequests(), "requests"), + Limits: getTaskResourcesAsSet(ctx, identifier, task.GetTemplate().GetContainer().GetResources().GetLimits(), "limits"), } } // fromAdminProtoTaskResourceSpec parses the flyteidl `TaskResourceSpec` message into a `TaskResourceSet`. func fromAdminProtoTaskResourceSpec(ctx context.Context, spec *admin.TaskResourceSpec) runtimeInterfaces.TaskResourceSet { result := runtimeInterfaces.TaskResourceSet{} - if len(spec.Cpu) > 0 { - result.CPU = parseQuantityNoError(ctx, "project", "cpu", spec.Cpu) + if len(spec.GetCpu()) > 0 { + result.CPU = parseQuantityNoError(ctx, "project", "cpu", spec.GetCpu()) } - if len(spec.Memory) > 0 { - result.Memory = parseQuantityNoError(ctx, "project", "memory", spec.Memory) + if len(spec.GetMemory()) > 0 { + result.Memory = parseQuantityNoError(ctx, "project", "memory", spec.GetMemory()) } - if len(spec.EphemeralStorage) > 0 { - result.EphemeralStorage = parseQuantityNoError(ctx, "project", "ephemeral storage", spec.EphemeralStorage) + if len(spec.GetEphemeralStorage()) > 0 { + result.EphemeralStorage = parseQuantityNoError(ctx, "project", "ephemeral storage", spec.GetEphemeralStorage()) } - if len(spec.Gpu) > 0 { - result.GPU = parseQuantityNoError(ctx, "project", "gpu", spec.Gpu) + if len(spec.GetGpu()) > 0 { + result.GPU = parseQuantityNoError(ctx, "project", "gpu", spec.GetGpu()) } return result @@ -86,14 +86,14 @@ func GetTaskResources(ctx context.Context, id *core.Identifier, resourceManager request := interfaces.ResourceRequest{ ResourceType: admin.MatchableResource_TASK_RESOURCE, } - if id != nil && len(id.Project) > 0 { - request.Project = id.Project + if id != nil && len(id.GetProject()) > 0 { + request.Project = id.GetProject() } - if id != nil && len(id.Domain) > 0 { - request.Domain = id.Domain + if id != nil && len(id.GetDomain()) > 0 { + request.Domain = id.GetDomain() } - if id != nil && id.ResourceType == core.ResourceType_WORKFLOW && len(id.Name) > 0 { - request.Workflow = id.Name + if id != nil && id.GetResourceType() == core.ResourceType_WORKFLOW && len(id.GetName()) > 0 { + request.Workflow = id.GetName() } resource, err := resourceManager.GetResource(ctx, request) @@ -105,8 +105,8 @@ func GetTaskResources(ctx context.Context, id *core.Identifier, resourceManager logger.Debugf(ctx, "Assigning task requested resources for [%+v]", id) var taskResourceAttributes = workflowengineInterfaces.TaskResources{} if resource != nil && resource.Attributes != nil && resource.Attributes.GetTaskResourceAttributes() != nil { - taskResourceAttributes.Defaults = fromAdminProtoTaskResourceSpec(ctx, resource.Attributes.GetTaskResourceAttributes().Defaults) - taskResourceAttributes.Limits = fromAdminProtoTaskResourceSpec(ctx, resource.Attributes.GetTaskResourceAttributes().Limits) + taskResourceAttributes.Defaults = fromAdminProtoTaskResourceSpec(ctx, resource.Attributes.GetTaskResourceAttributes().GetDefaults()) + taskResourceAttributes.Limits = fromAdminProtoTaskResourceSpec(ctx, resource.Attributes.GetTaskResourceAttributes().GetLimits()) } else { taskResourceAttributes = workflowengineInterfaces.TaskResources{ Defaults: taskResourceConfig.GetDefaults(), diff --git a/flyteadmin/pkg/manager/impl/util/resources_test.go b/flyteadmin/pkg/manager/impl/util/resources_test.go index c163b44e0c..932792f307 100644 --- a/flyteadmin/pkg/manager/impl/util/resources_test.go +++ b/flyteadmin/pkg/manager/impl/util/resources_test.go @@ -44,9 +44,9 @@ func TestGetTaskResources(t *testing.T) { resourceManager.GetResourceFunc = func(ctx context.Context, request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) { assert.EqualValues(t, request, managerInterfaces.ResourceRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, - Workflow: workflowIdentifier.Name, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), + Workflow: workflowIdentifier.GetName(), ResourceType: admin.MatchableResource_TASK_RESOURCE, }) return &managerInterfaces.ResourceResponse{}, nil @@ -73,9 +73,9 @@ func TestGetTaskResources(t *testing.T) { resourceManager.GetResourceFunc = func(ctx context.Context, request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) { assert.EqualValues(t, request, managerInterfaces.ResourceRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, - Workflow: workflowIdentifier.Name, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), + Workflow: workflowIdentifier.GetName(), ResourceType: admin.MatchableResource_TASK_RESOURCE, }) return &managerInterfaces.ResourceResponse{ diff --git a/flyteadmin/pkg/manager/impl/util/shared.go b/flyteadmin/pkg/manager/impl/util/shared.go index 8402451200..690ad32fa3 100644 --- a/flyteadmin/pkg/manager/impl/util/shared.go +++ b/flyteadmin/pkg/manager/impl/util/shared.go @@ -22,8 +22,8 @@ import ( ) func GetExecutionName(request *admin.ExecutionCreateRequest) string { - if request.Name != "" { - return request.Name + if request.GetName() != "" { + return request.GetName() } return common.GetExecutionName(time.Now().UnixNano()) } @@ -46,10 +46,10 @@ func GetTask(ctx context.Context, repo repoInterfaces.Repository, identifier *co func GetWorkflowModel( ctx context.Context, repo repoInterfaces.Repository, identifier *core.Identifier) (models.Workflow, error) { workflowModel, err := (repo).WorkflowRepo().Get(ctx, repoInterfaces.Identifier{ - Project: identifier.Project, - Domain: identifier.Domain, - Name: identifier.Name, - Version: identifier.Version, + Project: identifier.GetProject(), + Domain: identifier.GetDomain(), + Name: identifier.GetName(), + Version: identifier.GetVersion(), }) if err != nil { return models.Workflow{}, err @@ -87,7 +87,7 @@ func GetWorkflow( if err != nil { return nil, err } - closure.CreatedAt = workflow.Closure.CreatedAt + closure.CreatedAt = workflow.GetClosure().GetCreatedAt() workflow.Closure = closure return &workflow, nil } @@ -95,10 +95,10 @@ func GetWorkflow( func GetLaunchPlanModel( ctx context.Context, repo repoInterfaces.Repository, identifier *core.Identifier) (models.LaunchPlan, error) { launchPlanModel, err := (repo).LaunchPlanRepo().Get(ctx, repoInterfaces.Identifier{ - Project: identifier.Project, - Domain: identifier.Domain, - Name: identifier.Name, - Version: identifier.Version, + Project: identifier.GetProject(), + Domain: identifier.GetDomain(), + Name: identifier.GetName(), + Version: identifier.GetVersion(), }) if err != nil { return models.LaunchPlan{}, err @@ -119,9 +119,9 @@ func GetNamedEntityModel( ctx context.Context, repo repoInterfaces.Repository, resourceType core.ResourceType, identifier *admin.NamedEntityIdentifier) (models.NamedEntity, error) { metadataModel, err := (repo).NamedEntityRepo().Get(ctx, repoInterfaces.GetNamedEntityInput{ ResourceType: resourceType, - Project: identifier.Project, - Domain: identifier.Domain, - Name: identifier.Name, + Project: identifier.GetProject(), + Domain: identifier.GetDomain(), + Name: identifier.GetName(), }) if err != nil { return models.NamedEntity{}, err @@ -142,11 +142,11 @@ func GetNamedEntity( func GetDescriptionEntityModel( ctx context.Context, repo repoInterfaces.Repository, identifier *core.Identifier) (models.DescriptionEntity, error) { descriptionEntityModel, err := (repo).DescriptionEntityRepo().Get(ctx, repoInterfaces.GetDescriptionEntityInput{ - ResourceType: identifier.ResourceType, - Project: identifier.Project, - Domain: identifier.Domain, - Name: identifier.Name, - Version: identifier.Version, + ResourceType: identifier.GetResourceType(), + Project: identifier.GetProject(), + Domain: identifier.GetDomain(), + Name: identifier.GetName(), + Version: identifier.GetVersion(), }) if err != nil { return models.DescriptionEntity{}, err @@ -211,9 +211,9 @@ func GetExecutionModel( ctx context.Context, repo repoInterfaces.Repository, identifier *core.WorkflowExecutionIdentifier) ( *models.Execution, error) { executionModel, err := repo.ExecutionRepo().Get(ctx, repoInterfaces.Identifier{ - Project: identifier.Project, - Domain: identifier.Domain, - Name: identifier.Name, + Project: identifier.GetProject(), + Domain: identifier.GetDomain(), + Name: identifier.GetName(), }) if err != nil { return nil, err @@ -236,10 +236,10 @@ func GetNodeExecutionModel(ctx context.Context, repo repoInterfaces.Repository, func GetTaskModel(ctx context.Context, repo repoInterfaces.Repository, taskIdentifier *core.Identifier) ( *models.Task, error) { taskModel, err := repo.TaskRepo().Get(ctx, repoInterfaces.Identifier{ - Project: taskIdentifier.Project, - Domain: taskIdentifier.Domain, - Name: taskIdentifier.Name, - Version: taskIdentifier.Version, + Project: taskIdentifier.GetProject(), + Domain: taskIdentifier.GetDomain(), + Name: taskIdentifier.GetName(), + Version: taskIdentifier.GetVersion(), }) if err != nil { @@ -305,15 +305,15 @@ func MergeIntoExecConfig(workflowExecConfig *admin.WorkflowExecutionConfig, spec // Hence we do a deep check in the following conditions before assignment if (workflowExecConfig.GetRawOutputDataConfig() == nil || len(workflowExecConfig.GetRawOutputDataConfig().GetOutputLocationPrefix()) == 0) && - (spec.GetRawOutputDataConfig() != nil && len(spec.GetRawOutputDataConfig().OutputLocationPrefix) > 0) { + (spec.GetRawOutputDataConfig() != nil && len(spec.GetRawOutputDataConfig().GetOutputLocationPrefix()) > 0) { workflowExecConfig.RawOutputDataConfig = spec.GetRawOutputDataConfig() } - if (workflowExecConfig.GetLabels() == nil || len(workflowExecConfig.GetLabels().Values) == 0) && - (spec.GetLabels() != nil && len(spec.GetLabels().Values) > 0) { + if (workflowExecConfig.GetLabels() == nil || len(workflowExecConfig.GetLabels().GetValues()) == 0) && + (spec.GetLabels() != nil && len(spec.GetLabels().GetValues()) > 0) { workflowExecConfig.Labels = spec.GetLabels() } - if (workflowExecConfig.GetAnnotations() == nil || len(workflowExecConfig.GetAnnotations().Values) == 0) && - (spec.GetAnnotations() != nil && len(spec.GetAnnotations().Values) > 0) { + if (workflowExecConfig.GetAnnotations() == nil || len(workflowExecConfig.GetAnnotations().GetValues()) == 0) && + (spec.GetAnnotations() != nil && len(spec.GetAnnotations().GetValues()) > 0) { workflowExecConfig.Annotations = spec.GetAnnotations() } @@ -325,8 +325,8 @@ func MergeIntoExecConfig(workflowExecConfig *admin.WorkflowExecutionConfig, spec workflowExecConfig.OverwriteCache = spec.GetOverwriteCache() } - if (workflowExecConfig.GetEnvs() == nil || len(workflowExecConfig.GetEnvs().Values) == 0) && - (spec.GetEnvs() != nil && len(spec.GetEnvs().Values) > 0) { + if (workflowExecConfig.GetEnvs() == nil || len(workflowExecConfig.GetEnvs().GetValues()) == 0) && + (spec.GetEnvs() != nil && len(spec.GetEnvs().GetValues()) > 0) { workflowExecConfig.Envs = spec.GetEnvs() } diff --git a/flyteadmin/pkg/manager/impl/util/shared_test.go b/flyteadmin/pkg/manager/impl/util/shared_test.go index b9b296971e..09cb172638 100644 --- a/flyteadmin/pkg/manager/impl/util/shared_test.go +++ b/flyteadmin/pkg/manager/impl/util/shared_test.go @@ -81,10 +81,10 @@ func TestGetTask(t *testing.T) { }) assert.NoError(t, err) assert.NotNil(t, task) - assert.Equal(t, project, task.Id.Project) - assert.Equal(t, domain, task.Id.Domain) - assert.Equal(t, name, task.Id.Name) - assert.Equal(t, version, task.Id.Version) + assert.Equal(t, project, task.GetId().GetProject()) + assert.Equal(t, domain, task.GetId().GetDomain()) + assert.Equal(t, name, task.GetId().GetName()) + assert.Equal(t, version, task.GetId().GetVersion()) } func TestGetTask_DatabaseError(t *testing.T) { @@ -326,10 +326,10 @@ func TestGetLaunchPlan(t *testing.T) { }) assert.Nil(t, err) assert.NotNil(t, launchPlan) - assert.Equal(t, project, launchPlan.Id.Project) - assert.Equal(t, domain, launchPlan.Id.Domain) - assert.Equal(t, name, launchPlan.Id.Name) - assert.Equal(t, version, launchPlan.Id.Version) + assert.Equal(t, project, launchPlan.GetId().GetProject()) + assert.Equal(t, domain, launchPlan.GetId().GetDomain()) + assert.Equal(t, name, launchPlan.GetId().GetName()) + assert.Equal(t, version, launchPlan.GetId().GetVersion()) } func TestGetLaunchPlan_TransformerError(t *testing.T) { @@ -443,11 +443,11 @@ func TestGetNamedEntity(t *testing.T) { }) assert.Nil(t, err) assert.NotNil(t, entity) - assert.Equal(t, project, entity.Id.Project) - assert.Equal(t, domain, entity.Id.Domain) - assert.Equal(t, name, entity.Id.Name) - assert.Equal(t, description, entity.Metadata.Description) - assert.Equal(t, resourceType, entity.ResourceType) + assert.Equal(t, project, entity.GetId().GetProject()) + assert.Equal(t, domain, entity.GetId().GetDomain()) + assert.Equal(t, name, entity.GetId().GetName()) + assert.Equal(t, description, entity.GetMetadata().GetDescription()) + assert.Equal(t, resourceType, entity.GetResourceType()) } func TestGetActiveLaunchPlanVersionFilters(t *testing.T) { @@ -505,7 +505,7 @@ func TestGetMatchableResource(t *testing.T) { } mr, err := GetMatchableResource(context.Background(), resourceManager, resourceType, project, domain, "") - assert.Equal(t, int32(12), mr.Attributes.GetWorkflowExecutionConfig().MaxParallelism) + assert.Equal(t, int32(12), mr.Attributes.GetWorkflowExecutionConfig().GetMaxParallelism()) assert.Nil(t, err) }) t.Run("successful fetch workflow matchable", func(t *testing.T) { @@ -530,7 +530,7 @@ func TestGetMatchableResource(t *testing.T) { } mr, err := GetMatchableResource(context.Background(), resourceManager, resourceType, project, domain, workflow) - assert.Equal(t, int32(12), mr.Attributes.GetWorkflowExecutionConfig().MaxParallelism) + assert.Equal(t, int32(12), mr.Attributes.GetWorkflowExecutionConfig().GetMaxParallelism()) assert.Nil(t, err) }) @@ -614,7 +614,7 @@ func TestGetDescriptionEntity(t *testing.T) { }) assert.Nil(t, err) assert.NotNil(t, entity) - assert.Equal(t, "hello world", entity.ShortDescription) + assert.Equal(t, "hello world", entity.GetShortDescription()) }) t.Run("Failed to get DescriptionEntity", func(t *testing.T) { diff --git a/flyteadmin/pkg/manager/impl/util/single_task_execution.go b/flyteadmin/pkg/manager/impl/util/single_task_execution.go index 036610a9ec..ff7a8b70ce 100644 --- a/flyteadmin/pkg/manager/impl/util/single_task_execution.go +++ b/flyteadmin/pkg/manager/impl/util/single_task_execution.go @@ -48,8 +48,8 @@ func generateWorkflowNameFromTask(taskName string) string { } func generateBindings(outputs *core.VariableMap, nodeID string) []*core.Binding { - bindings := make([]*core.Binding, 0, len(outputs.Variables)) - for key := range outputs.Variables { + bindings := make([]*core.Binding, 0, len(outputs.GetVariables())) + for key := range outputs.GetVariables() { binding := &core.Binding{ Var: key, Binding: &core.BindingData{ @@ -73,16 +73,16 @@ func CreateOrGetWorkflowModel( task *admin.Task) (*models.Workflow, error) { workflowIdentifier := core.Identifier{ ResourceType: core.ResourceType_WORKFLOW, - Project: taskIdentifier.Project, - Domain: taskIdentifier.Domain, - Name: generateWorkflowNameFromTask(taskIdentifier.Name), - Version: taskIdentifier.Version, + Project: taskIdentifier.GetProject(), + Domain: taskIdentifier.GetDomain(), + Name: generateWorkflowNameFromTask(taskIdentifier.GetName()), + Version: taskIdentifier.GetVersion(), } workflowModel, err := db.WorkflowRepo().Get(ctx, repositoryInterfaces.Identifier{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, - Name: workflowIdentifier.Name, - Version: workflowIdentifier.Version, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), + Name: workflowIdentifier.GetName(), + Version: workflowIdentifier.GetVersion(), }) var retryStrategy *core.RetryStrategy @@ -100,15 +100,15 @@ func CreateOrGetWorkflowModel( workflowSpec := admin.WorkflowSpec{ Template: &core.WorkflowTemplate{ Id: &workflowIdentifier, - Interface: task.Closure.CompiledTask.Template.Interface, + Interface: task.GetClosure().GetCompiledTask().GetTemplate().GetInterface(), Nodes: []*core.Node{ { - Id: generateNodeNameFromTask(taskIdentifier.Name), + Id: generateNodeNameFromTask(taskIdentifier.GetName()), Metadata: &core.NodeMetadata{ - Name: generateNodeNameFromTask(taskIdentifier.Name), + Name: generateNodeNameFromTask(taskIdentifier.GetName()), Retries: retryStrategy, }, - Inputs: generateBindings(task.Closure.CompiledTask.Template.Interface.Inputs, noInputNodeID), + Inputs: generateBindings(task.GetClosure().GetCompiledTask().GetTemplate().GetInterface().GetInputs(), noInputNodeID), Target: &core.Node_TaskNode{ TaskNode: &core.TaskNode{ Reference: &core.TaskNode_ReferenceId{ @@ -119,7 +119,7 @@ func CreateOrGetWorkflowModel( }, }, - Outputs: generateBindings(task.Closure.CompiledTask.Template.Interface.Outputs, generateNodeNameFromTask(taskIdentifier.Name)), + Outputs: generateBindings(task.GetClosure().GetCompiledTask().GetTemplate().GetInterface().GetOutputs(), generateNodeNameFromTask(taskIdentifier.GetName())), }, } @@ -138,9 +138,9 @@ func CreateOrGetWorkflowModel( _, err = namedEntityManager.UpdateNamedEntity(ctx, &admin.NamedEntityUpdateRequest{ ResourceType: core.ResourceType_WORKFLOW, Id: &admin.NamedEntityIdentifier{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, - Name: workflowIdentifier.Name, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), + Name: workflowIdentifier.GetName(), }, Metadata: &admin.NamedEntityMetadata{State: admin.NamedEntityState_SYSTEM_GENERATED}, }) @@ -149,10 +149,10 @@ func CreateOrGetWorkflowModel( return nil, err } workflowModel, err = db.WorkflowRepo().Get(ctx, repositoryInterfaces.Identifier{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, - Name: workflowIdentifier.Name, - Version: workflowIdentifier.Version, + Project: workflowIdentifier.GetProject(), + Domain: workflowIdentifier.GetDomain(), + Name: workflowIdentifier.GetName(), + Version: workflowIdentifier.GetVersion(), }) if err != nil { // This is unexpected - at this point we've successfully just created the skeleton workflow. @@ -171,10 +171,10 @@ func CreateOrGetLaunchPlan(ctx context.Context, var err error launchPlanIdentifier := &core.Identifier{ ResourceType: core.ResourceType_LAUNCH_PLAN, - Project: taskIdentifier.Project, - Domain: taskIdentifier.Domain, - Name: generateWorkflowNameFromTask(taskIdentifier.Name), - Version: taskIdentifier.Version, + Project: taskIdentifier.GetProject(), + Domain: taskIdentifier.GetDomain(), + Name: generateWorkflowNameFromTask(taskIdentifier.GetName()), + Version: taskIdentifier.GetVersion(), } launchPlan, err = GetLaunchPlan(ctx, db, launchPlanIdentifier) if err != nil { @@ -188,29 +188,29 @@ func CreateOrGetLaunchPlan(ctx context.Context, Spec: &admin.LaunchPlanSpec{ WorkflowId: &core.Identifier{ ResourceType: core.ResourceType_WORKFLOW, - Project: taskIdentifier.Project, - Domain: taskIdentifier.Domain, - Name: taskIdentifier.Name, - Version: taskIdentifier.Version, + Project: taskIdentifier.GetProject(), + Domain: taskIdentifier.GetDomain(), + Name: taskIdentifier.GetName(), + Version: taskIdentifier.GetVersion(), }, EntityMetadata: &admin.LaunchPlanMetadata{}, DefaultInputs: &core.ParameterMap{}, FixedInputs: &core.LiteralMap{}, Labels: &admin.Labels{}, Annotations: &admin.Annotations{}, - AuthRole: spec.AuthRole, - SecurityContext: spec.SecurityContext, + AuthRole: spec.GetAuthRole(), + SecurityContext: spec.GetSecurityContext(), }, } if err := validation.ValidateLaunchPlan(ctx, generatedCreateLaunchPlanReq, db, config.ApplicationConfiguration(), workflowInterface); err != nil { logger.Debugf(ctx, "could not create launch plan: %+v, request failed validation with err: %v", taskIdentifier, err) return nil, err } - transformedLaunchPlan := transformers.CreateLaunchPlan(generatedCreateLaunchPlanReq, workflowInterface.Outputs) + transformedLaunchPlan := transformers.CreateLaunchPlan(generatedCreateLaunchPlanReq, workflowInterface.GetOutputs()) launchPlan = transformedLaunchPlan launchPlanDigest, err := GetLaunchPlanDigest(ctx, launchPlan) if err != nil { - logger.Errorf(ctx, "failed to compute launch plan digest for [%+v] with err: %v", launchPlan.Id, err) + logger.Errorf(ctx, "failed to compute launch plan digest for [%+v] with err: %v", launchPlan.GetId(), err) return nil, err } launchPlanModel, err := @@ -218,7 +218,7 @@ func CreateOrGetLaunchPlan(ctx context.Context, if err != nil { logger.Errorf(ctx, "Failed to transform launch plan model [%+v], and workflow outputs [%+v] with err: %v", - taskIdentifier, workflowInterface.Outputs, err) + taskIdentifier, workflowInterface.GetOutputs(), err) return nil, err } err = db.LaunchPlanRepo().Create(ctx, launchPlanModel) diff --git a/flyteadmin/pkg/manager/impl/util/single_task_execution_test.go b/flyteadmin/pkg/manager/impl/util/single_task_execution_test.go index 13ed4a945d..d0aff9edef 100644 --- a/flyteadmin/pkg/manager/impl/util/single_task_execution_test.go +++ b/flyteadmin/pkg/manager/impl/util/single_task_execution_test.go @@ -88,13 +88,13 @@ func TestCreateOrGetWorkflowModel(t *testing.T) { mockNamedEntityManager := managerMocks.NamedEntityManager{} mockNamedEntityManager.UpdateNamedEntityFunc = func(ctx context.Context, request *admin.NamedEntityUpdateRequest) (*admin.NamedEntityUpdateResponse, error) { - assert.Equal(t, request.ResourceType, core.ResourceType_WORKFLOW) - assert.True(t, proto.Equal(request.Id, &admin.NamedEntityIdentifier{ + assert.Equal(t, request.GetResourceType(), core.ResourceType_WORKFLOW) + assert.True(t, proto.Equal(request.GetId(), &admin.NamedEntityIdentifier{ Project: "flytekit", Domain: "production", Name: ".flytegen.app.workflows.MyWorkflow.my_task", - }), fmt.Sprintf("%+v", request.Id)) - assert.True(t, proto.Equal(request.Metadata, &admin.NamedEntityMetadata{ + }), fmt.Sprintf("%+v", request.GetId())) + assert.True(t, proto.Equal(request.GetMetadata(), &admin.NamedEntityMetadata{ State: admin.NamedEntityState_SYSTEM_GENERATED, })) return &admin.NamedEntityUpdateResponse{}, nil @@ -102,13 +102,13 @@ func TestCreateOrGetWorkflowModel(t *testing.T) { mockWorkflowManager := managerMocks.MockWorkflowManager{} mockWorkflowManager.SetCreateCallback(func(ctx context.Context, request *admin.WorkflowCreateRequest) (*admin.WorkflowCreateResponse, error) { - assert.True(t, proto.Equal(request.Id, &core.Identifier{ + assert.True(t, proto.Equal(request.GetId(), &core.Identifier{ ResourceType: core.ResourceType_WORKFLOW, Project: "flytekit", Domain: "production", Name: ".flytegen.app.workflows.MyWorkflow.my_task", Version: "12345", - }), fmt.Sprintf("%+v", request.Id)) + }), fmt.Sprintf("%+v", request.GetId())) assert.Len(t, request.GetSpec().GetTemplate().GetNodes(), 1) assert.Equal(t, request.GetSpec().GetTemplate().GetNodes()[0].GetMetadata().GetRetries().GetRetries(), uint32(2)) @@ -220,13 +220,13 @@ func TestCreateOrGetLaunchPlan(t *testing.T) { mockNamedEntityManager := managerMocks.NamedEntityManager{} mockNamedEntityManager.UpdateNamedEntityFunc = func(ctx context.Context, request *admin.NamedEntityUpdateRequest) (*admin.NamedEntityUpdateResponse, error) { - assert.Equal(t, request.ResourceType, core.ResourceType_LAUNCH_PLAN) - assert.True(t, proto.Equal(request.Id, &admin.NamedEntityIdentifier{ + assert.Equal(t, request.GetResourceType(), core.ResourceType_LAUNCH_PLAN) + assert.True(t, proto.Equal(request.GetId(), &admin.NamedEntityIdentifier{ Project: "flytekit", Domain: "production", Name: ".flytegen.app.workflows.MyWorkflow.my_task", - }), fmt.Sprintf("%+v", request.Id)) - assert.True(t, proto.Equal(request.Metadata, &admin.NamedEntityMetadata{ + }), fmt.Sprintf("%+v", request.GetId())) + assert.True(t, proto.Equal(request.GetMetadata(), &admin.NamedEntityMetadata{ State: admin.NamedEntityState_SYSTEM_GENERATED, })) return &admin.NamedEntityUpdateResponse{}, nil @@ -256,7 +256,7 @@ func TestCreateOrGetLaunchPlan(t *testing.T) { Domain: "production", Name: ".flytegen.app.workflows.MyWorkflow.my_task", Version: "12345", - }, launchPlan.Id)) - assert.True(t, proto.Equal(launchPlan.Closure.ExpectedOutputs, workflowInterface.Outputs)) - assert.True(t, proto.Equal(launchPlan.Spec.AuthRole, spec.AuthRole)) + }, launchPlan.GetId())) + assert.True(t, proto.Equal(launchPlan.GetClosure().GetExpectedOutputs(), workflowInterface.GetOutputs())) + assert.True(t, proto.Equal(launchPlan.GetSpec().GetAuthRole(), spec.GetAuthRole())) } diff --git a/flyteadmin/pkg/manager/impl/validation/attributes_validator.go b/flyteadmin/pkg/manager/impl/validation/attributes_validator.go index bfaccd80a1..99929513b5 100644 --- a/flyteadmin/pkg/manager/impl/validation/attributes_validator.go +++ b/flyteadmin/pkg/manager/impl/validation/attributes_validator.go @@ -42,15 +42,15 @@ func ValidateProjectDomainAttributesUpdateRequest(ctx context.Context, db repositoryInterfaces.Repository, config runtimeInterfaces.ApplicationConfiguration, request *admin.ProjectDomainAttributesUpdateRequest) ( admin.MatchableResource, error) { - if request.Attributes == nil { + if request.GetAttributes() == nil { return defaultMatchableResource, shared.GetMissingArgumentError(shared.Attributes) } - if err := ValidateProjectAndDomain(ctx, db, config, request.Attributes.Project, request.Attributes.Domain); err != nil { + if err := ValidateProjectAndDomain(ctx, db, config, request.GetAttributes().GetProject(), request.GetAttributes().GetDomain()); err != nil { return defaultMatchableResource, err } - return validateMatchingAttributes(request.Attributes.MatchingAttributes, - fmt.Sprintf("%s-%s", request.Attributes.Project, request.Attributes.Domain)) + return validateMatchingAttributes(request.GetAttributes().GetMatchingAttributes(), + fmt.Sprintf("%s-%s", request.GetAttributes().GetProject(), request.GetAttributes().GetDomain())) } func ValidateProjectAttributesUpdateRequest(ctx context.Context, @@ -58,19 +58,19 @@ func ValidateProjectAttributesUpdateRequest(ctx context.Context, request *admin.ProjectAttributesUpdateRequest) ( admin.MatchableResource, error) { - if request.Attributes == nil { + if request.GetAttributes() == nil { return defaultMatchableResource, shared.GetMissingArgumentError(shared.Attributes) } - if err := ValidateProjectForUpdate(ctx, db, request.Attributes.Project); err != nil { + if err := ValidateProjectForUpdate(ctx, db, request.GetAttributes().GetProject()); err != nil { return defaultMatchableResource, err } - return validateMatchingAttributes(request.Attributes.MatchingAttributes, request.Attributes.Project) + return validateMatchingAttributes(request.GetAttributes().GetMatchingAttributes(), request.GetAttributes().GetProject()) } func ValidateProjectDomainAttributesGetRequest(ctx context.Context, db repositoryInterfaces.Repository, config runtimeInterfaces.ApplicationConfiguration, request *admin.ProjectDomainAttributesGetRequest) error { - if err := ValidateProjectAndDomain(ctx, db, config, request.Project, request.Domain); err != nil { + if err := ValidateProjectAndDomain(ctx, db, config, request.GetProject(), request.GetDomain()); err != nil { return err } @@ -79,7 +79,7 @@ func ValidateProjectDomainAttributesGetRequest(ctx context.Context, db repositor func ValidateProjectDomainAttributesDeleteRequest(ctx context.Context, db repositoryInterfaces.Repository, config runtimeInterfaces.ApplicationConfiguration, request *admin.ProjectDomainAttributesDeleteRequest) error { - if err := ValidateProjectAndDomain(ctx, db, config, request.Project, request.Domain); err != nil { + if err := ValidateProjectAndDomain(ctx, db, config, request.GetProject(), request.GetDomain()); err != nil { return err } @@ -89,26 +89,26 @@ func ValidateProjectDomainAttributesDeleteRequest(ctx context.Context, db reposi func ValidateWorkflowAttributesUpdateRequest(ctx context.Context, db repositoryInterfaces.Repository, config runtimeInterfaces.ApplicationConfiguration, request *admin.WorkflowAttributesUpdateRequest) ( admin.MatchableResource, error) { - if request.Attributes == nil { + if request.GetAttributes() == nil { return defaultMatchableResource, shared.GetMissingArgumentError(shared.Attributes) } - if err := ValidateProjectAndDomain(ctx, db, config, request.Attributes.Project, request.Attributes.Domain); err != nil { + if err := ValidateProjectAndDomain(ctx, db, config, request.GetAttributes().GetProject(), request.GetAttributes().GetDomain()); err != nil { return defaultMatchableResource, err } - if err := ValidateEmptyStringField(request.Attributes.Workflow, shared.Name); err != nil { + if err := ValidateEmptyStringField(request.GetAttributes().GetWorkflow(), shared.Name); err != nil { return defaultMatchableResource, err } - return validateMatchingAttributes(request.Attributes.MatchingAttributes, - fmt.Sprintf("%s-%s-%s", request.Attributes.Project, request.Attributes.Domain, request.Attributes.Workflow)) + return validateMatchingAttributes(request.GetAttributes().GetMatchingAttributes(), + fmt.Sprintf("%s-%s-%s", request.GetAttributes().GetProject(), request.GetAttributes().GetDomain(), request.GetAttributes().GetWorkflow())) } func ValidateWorkflowAttributesGetRequest(ctx context.Context, db repositoryInterfaces.Repository, config runtimeInterfaces.ApplicationConfiguration, request *admin.WorkflowAttributesGetRequest) error { - if err := ValidateProjectAndDomain(ctx, db, config, request.Project, request.Domain); err != nil { + if err := ValidateProjectAndDomain(ctx, db, config, request.GetProject(), request.GetDomain()); err != nil { return err } - if err := ValidateEmptyStringField(request.Workflow, shared.Name); err != nil { + if err := ValidateEmptyStringField(request.GetWorkflow(), shared.Name); err != nil { return err } @@ -117,10 +117,10 @@ func ValidateWorkflowAttributesGetRequest(ctx context.Context, db repositoryInte func ValidateWorkflowAttributesDeleteRequest(ctx context.Context, db repositoryInterfaces.Repository, config runtimeInterfaces.ApplicationConfiguration, request *admin.WorkflowAttributesDeleteRequest) error { - if err := ValidateProjectAndDomain(ctx, db, config, request.Project, request.Domain); err != nil { + if err := ValidateProjectAndDomain(ctx, db, config, request.GetProject(), request.GetDomain()); err != nil { return err } - if err := ValidateEmptyStringField(request.Workflow, shared.Name); err != nil { + if err := ValidateEmptyStringField(request.GetWorkflow(), shared.Name); err != nil { return err } @@ -128,7 +128,7 @@ func ValidateWorkflowAttributesDeleteRequest(ctx context.Context, db repositoryI } func ValidateListAllMatchableAttributesRequest(request *admin.ListMatchableAttributesRequest) error { - if _, ok := admin.MatchableResource_name[int32(request.ResourceType)]; !ok { + if _, ok := admin.MatchableResource_name[int32(request.GetResourceType())]; !ok { return shared.GetInvalidArgumentError(shared.ResourceType) } return nil diff --git a/flyteadmin/pkg/manager/impl/validation/execution_validator.go b/flyteadmin/pkg/manager/impl/validation/execution_validator.go index f5fd30598a..c9f357b525 100644 --- a/flyteadmin/pkg/manager/impl/validation/execution_validator.go +++ b/flyteadmin/pkg/manager/impl/validation/execution_validator.go @@ -28,47 +28,47 @@ var acceptedReferenceLaunchTypes = map[core.ResourceType]interface{}{ func ValidateExecutionRequest(ctx context.Context, request *admin.ExecutionCreateRequest, db repositoryInterfaces.Repository, config runtimeInterfaces.ApplicationConfiguration) error { - if err := ValidateEmptyStringField(request.Project, shared.Project); err != nil { + if err := ValidateEmptyStringField(request.GetProject(), shared.Project); err != nil { return err } - if err := ValidateEmptyStringField(request.Domain, shared.Domain); err != nil { + if err := ValidateEmptyStringField(request.GetDomain(), shared.Domain); err != nil { return err } - if request.Name != "" { - if err := CheckValidExecutionID(strings.ToLower(request.Name), shared.Name); err != nil { + if request.GetName() != "" { + if err := CheckValidExecutionID(strings.ToLower(request.GetName()), shared.Name); err != nil { return err } } - if len(request.Name) > allowedExecutionNameLength { + if len(request.GetName()) > allowedExecutionNameLength { return errors.NewFlyteAdminErrorf(codes.InvalidArgument, "name for ExecutionCreateRequest [%+v] exceeded allowed length %d", request, allowedExecutionNameLength) } - if err := ValidateProjectAndDomain(ctx, db, config, request.Project, request.Domain); err != nil { + if err := ValidateProjectAndDomain(ctx, db, config, request.GetProject(), request.GetDomain()); err != nil { return err } - if request.Spec == nil { + if request.GetSpec() == nil { return shared.GetMissingArgumentError(shared.Spec) } // TODO(katrogan): Change the name of Spec.LaunchPlan to something more generic to permit reference Tasks. // https://github.com/flyteorg/flyte/issues/262 - if err := ValidateIdentifierFieldsSet(request.Spec.LaunchPlan); err != nil { + if err := ValidateIdentifierFieldsSet(request.GetSpec().GetLaunchPlan()); err != nil { return err } - if _, ok := acceptedReferenceLaunchTypes[request.Spec.LaunchPlan.ResourceType]; !ok { + if _, ok := acceptedReferenceLaunchTypes[request.GetSpec().GetLaunchPlan().GetResourceType()]; !ok { return errors.NewFlyteAdminErrorf(codes.InvalidArgument, "Invalid reference entity resource type [%v], only [%+v] allowed", - request.Spec.LaunchPlan.ResourceType, acceptedReferenceLaunchTypes) + request.GetSpec().GetLaunchPlan().GetResourceType(), acceptedReferenceLaunchTypes) } - if err := validateLiteralMap(request.Inputs, shared.Inputs); err != nil { + if err := validateLiteralMap(request.GetInputs(), shared.Inputs); err != nil { return err } - if request.Spec.GetNotifications() != nil { - if err := validateNotifications(request.Spec.GetNotifications().Notifications); err != nil { + if request.GetSpec().GetNotifications() != nil { + if err := validateNotifications(request.GetSpec().GetNotifications().GetNotifications()); err != nil { return err } } - if err := validateLabels(request.Spec.Labels); err != nil { + if err := validateLabels(request.GetSpec().GetLabels()); err != nil { return err } return nil @@ -140,14 +140,14 @@ func CheckValidExecutionID(executionID, fieldName string) error { } func ValidateCreateWorkflowEventRequest(request *admin.WorkflowExecutionEventRequest, maxOutputSizeInBytes int64) error { - if request.Event == nil { + if request.GetEvent() == nil { return errors.NewFlyteAdminErrorf(codes.InvalidArgument, "Workflow event handler was called without event") - } else if request.Event.ExecutionId == nil { + } else if request.GetEvent().GetExecutionId() == nil { return errors.NewFlyteAdminErrorf(codes.InvalidArgument, - "Workflow event handler request event doesn't have an execution id - %v", request.Event) + "Workflow event handler request event doesn't have an execution id - %v", request.GetEvent()) } - if err := ValidateOutputData(request.Event.GetOutputData(), maxOutputSizeInBytes); err != nil { + if err := ValidateOutputData(request.GetEvent().GetOutputData(), maxOutputSizeInBytes); err != nil { return err } return nil @@ -157,13 +157,13 @@ func ValidateWorkflowExecutionIdentifier(identifier *core.WorkflowExecutionIdent if identifier == nil { return shared.GetMissingArgumentError(shared.ID) } - if err := ValidateEmptyStringField(identifier.Project, shared.Project); err != nil { + if err := ValidateEmptyStringField(identifier.GetProject(), shared.Project); err != nil { return err } - if err := ValidateEmptyStringField(identifier.Domain, shared.Domain); err != nil { + if err := ValidateEmptyStringField(identifier.GetDomain(), shared.Domain); err != nil { return err } - if err := ValidateEmptyStringField(identifier.Name, shared.Name); err != nil { + if err := ValidateEmptyStringField(identifier.GetName(), shared.Name); err != nil { return err } return nil diff --git a/flyteadmin/pkg/manager/impl/validation/execution_validator_test.go b/flyteadmin/pkg/manager/impl/validation/execution_validator_test.go index 943e5006e7..89e97370fa 100644 --- a/flyteadmin/pkg/manager/impl/validation/execution_validator_test.go +++ b/flyteadmin/pkg/manager/impl/validation/execution_validator_test.go @@ -92,9 +92,9 @@ func TestGetExecutionInputs(t *testing.T) { lpRequest := testutils.GetLaunchPlanRequest() actualInputs, err := CheckAndFetchInputsForExecution( - executionRequest.Inputs, - lpRequest.Spec.FixedInputs, - lpRequest.Spec.DefaultInputs, + executionRequest.GetInputs(), + lpRequest.GetSpec().GetFixedInputs(), + lpRequest.GetSpec().GetDefaultInputs(), ) expectedMap := &core.LiteralMap{ Literals: map[string]*core.Literal{ @@ -125,9 +125,9 @@ func TestGetExecutionWithOffloadedInputs(t *testing.T) { lpRequest := testutils.GetLaunchPlanRequest() actualInputs, err := CheckAndFetchInputsForExecution( - executionRequest.Inputs, - lpRequest.Spec.FixedInputs, - lpRequest.Spec.DefaultInputs, + executionRequest.GetInputs(), + lpRequest.GetSpec().GetFixedInputs(), + lpRequest.GetSpec().GetDefaultInputs(), ) expectedMap := core.LiteralMap{ Literals: map[string]*core.Literal{ @@ -137,8 +137,8 @@ func TestGetExecutionWithOffloadedInputs(t *testing.T) { } assert.Nil(t, err) assert.NotNil(t, actualInputs) - assert.EqualValues(t, expectedMap.GetLiterals()["foo"], actualInputs.Literals["foo"]) - assert.EqualValues(t, expectedMap.GetLiterals()["bar"], actualInputs.Literals["bar"]) + assert.EqualValues(t, expectedMap.GetLiterals()["foo"], actualInputs.GetLiterals()["foo"]) + assert.EqualValues(t, expectedMap.GetLiterals()["bar"], actualInputs.GetLiterals()["bar"]) } func TestValidateExecInputsWrongType(t *testing.T) { @@ -150,9 +150,9 @@ func TestValidateExecInputsWrongType(t *testing.T) { }, } _, err := CheckAndFetchInputsForExecution( - executionRequest.Inputs, - lpRequest.Spec.FixedInputs, - lpRequest.Spec.DefaultInputs, + executionRequest.GetInputs(), + lpRequest.GetSpec().GetFixedInputs(), + lpRequest.GetSpec().GetDefaultInputs(), ) utils.AssertEqualWithSanitizedRegex(t, "invalid foo input wrong type. Expected simple:STRING, but got simple:INTEGER", err.Error()) } @@ -167,9 +167,9 @@ func TestValidateExecInputsExtraInputs(t *testing.T) { }, } _, err := CheckAndFetchInputsForExecution( - executionRequest.Inputs, - lpRequest.Spec.FixedInputs, - lpRequest.Spec.DefaultInputs, + executionRequest.GetInputs(), + lpRequest.GetSpec().GetFixedInputs(), + lpRequest.GetSpec().GetDefaultInputs(), ) assert.EqualError(t, err, "invalid input foo-extra") } @@ -184,9 +184,9 @@ func TestValidateExecInputsOverrideFixed(t *testing.T) { }, } _, err := CheckAndFetchInputsForExecution( - executionRequest.Inputs, - lpRequest.Spec.FixedInputs, - lpRequest.Spec.DefaultInputs, + executionRequest.GetInputs(), + lpRequest.GetSpec().GetFixedInputs(), + lpRequest.GetSpec().GetDefaultInputs(), ) assert.EqualError(t, err, "invalid input bar") } @@ -196,9 +196,9 @@ func TestValidateExecEmptyInputs(t *testing.T) { lpRequest := testutils.GetLaunchPlanRequest() executionRequest.Inputs = nil actualInputs, err := CheckAndFetchInputsForExecution( - executionRequest.Inputs, - lpRequest.Spec.FixedInputs, - lpRequest.Spec.DefaultInputs, + executionRequest.GetInputs(), + lpRequest.GetSpec().GetFixedInputs(), + lpRequest.GetSpec().GetDefaultInputs(), ) expectedMap := &core.LiteralMap{ Literals: map[string]*core.Literal{ diff --git a/flyteadmin/pkg/manager/impl/validation/launch_plan_validator.go b/flyteadmin/pkg/manager/impl/validation/launch_plan_validator.go index 2a49b4da87..0308faceba 100644 --- a/flyteadmin/pkg/manager/impl/validation/launch_plan_validator.go +++ b/flyteadmin/pkg/manager/impl/validation/launch_plan_validator.go @@ -3,6 +3,7 @@ package validation import ( "context" + "github.com/robfig/cron/v3" "google.golang.org/grpc/codes" "github.com/flyteorg/flyte/flyteadmin/pkg/common" @@ -18,36 +19,36 @@ import ( func ValidateLaunchPlan(ctx context.Context, request *admin.LaunchPlanCreateRequest, db repositoryInterfaces.Repository, config runtimeInterfaces.ApplicationConfiguration, workflowInterface *core.TypedInterface) error { - if err := ValidateIdentifier(request.Id, common.LaunchPlan); err != nil { + if err := ValidateIdentifier(request.GetId(), common.LaunchPlan); err != nil { return err } - if err := ValidateProjectAndDomain(ctx, db, config, request.Id.Project, request.Id.Domain); err != nil { + if err := ValidateProjectAndDomain(ctx, db, config, request.GetId().GetProject(), request.GetId().GetDomain()); err != nil { return err } - if request.Spec == nil { + if request.GetSpec() == nil { return shared.GetMissingArgumentError(shared.Spec) } - if err := ValidateIdentifier(request.Spec.WorkflowId, common.Workflow); err != nil { + if err := ValidateIdentifier(request.GetSpec().GetWorkflowId(), common.Workflow); err != nil { return err } - if err := validateLabels(request.Spec.Labels); err != nil { + if err := validateLabels(request.GetSpec().GetLabels()); err != nil { return err } - if err := validateLiteralMap(request.Spec.FixedInputs, shared.FixedInputs); err != nil { + if err := validateLiteralMap(request.GetSpec().GetFixedInputs(), shared.FixedInputs); err != nil { return err } if config.GetTopLevelConfig().FeatureGates.EnableArtifacts { - if err := validateParameterMapAllowArtifacts(request.Spec.DefaultInputs, shared.DefaultInputs); err != nil { + if err := validateParameterMapAllowArtifacts(request.GetSpec().GetDefaultInputs(), shared.DefaultInputs); err != nil { return err } } else { - if err := validateParameterMapDisableArtifacts(request.Spec.DefaultInputs, shared.DefaultInputs); err != nil { + if err := validateParameterMapDisableArtifacts(request.GetSpec().GetDefaultInputs(), shared.DefaultInputs); err != nil { return err } } - expectedInputs, err := checkAndFetchExpectedInputForLaunchPlan(workflowInterface.GetInputs(), request.Spec.FixedInputs, request.Spec.DefaultInputs) + expectedInputs, err := checkAndFetchExpectedInputForLaunchPlan(workflowInterface.GetInputs(), request.GetSpec().GetFixedInputs(), request.GetSpec().GetDefaultInputs()) if err != nil { return err } @@ -57,8 +58,8 @@ func ValidateLaunchPlan(ctx context.Context, // Augment default inputs with the unbound workflow inputs. request.Spec.DefaultInputs = expectedInputs - if request.Spec.EntityMetadata != nil { - if err := validateNotifications(request.Spec.EntityMetadata.Notifications); err != nil { + if request.GetSpec().GetEntityMetadata() != nil { + if err := validateNotifications(request.GetSpec().GetEntityMetadata().GetNotifications()); err != nil { return err } if request.GetSpec().GetEntityMetadata().GetLaunchConditions() != nil { @@ -73,7 +74,7 @@ func ValidateLaunchPlan(ctx context.Context, func validateSchedule(request *admin.LaunchPlanCreateRequest, expectedInputs *core.ParameterMap) error { schedule := request.GetSpec().GetEntityMetadata().GetSchedule() if schedule.GetCronExpression() != "" || schedule.GetRate() != nil || schedule.GetCronSchedule() != nil { - for key, value := range expectedInputs.Parameters { + for key, value := range expectedInputs.GetParameters() { if value.GetRequired() && key != schedule.GetKickoffTimeInputArg() { return errors.NewFlyteAdminErrorf( codes.InvalidArgument, @@ -81,7 +82,7 @@ func validateSchedule(request *admin.LaunchPlanCreateRequest, expectedInputs *co } } if schedule.GetKickoffTimeInputArg() != "" { - if param, ok := expectedInputs.Parameters[schedule.GetKickoffTimeInputArg()]; !ok { + if param, ok := expectedInputs.GetParameters()[schedule.GetKickoffTimeInputArg()]; !ok { return errors.NewFlyteAdminErrorf( codes.InvalidArgument, "Cannot create a schedule with a KickoffTimeInputArg that does not point to a free input. [%v] is not free or does not exist.", schedule.GetKickoffTimeInputArg()) @@ -91,6 +92,19 @@ func validateSchedule(request *admin.LaunchPlanCreateRequest, expectedInputs *co "KickoffTimeInputArg must reference a datetime input. [%v] is a [%v]", schedule.GetKickoffTimeInputArg(), param.GetVar().GetType()) } } + + // validate cron expression + var cronExpression string + if schedule.GetCronExpression() != "" { + cronExpression = schedule.GetCronExpression() + } else if schedule.GetCronSchedule() != nil { + cronExpression = schedule.GetCronSchedule().GetSchedule() + } + if cronExpression != "" { + if _, err := cron.ParseStandard(cronExpression); err != nil { + return errors.NewFlyteAdminErrorf(codes.InvalidArgument, "Invalid cron expression: %v", err) + } + } } return nil } @@ -111,7 +125,7 @@ func checkAndFetchExpectedInputForLaunchPlan( } // If there are no inputs that the workflow requires, there should be none at launch plan as well - if workflowVariableMap == nil || len(workflowVariableMap.Variables) == 0 { + if workflowVariableMap == nil || len(workflowVariableMap.GetVariables()) == 0 { if len(defaultInputMap) > 0 { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid launch plan default inputs, expected none but found %d", len(defaultInputMap)) @@ -125,7 +139,7 @@ func checkAndFetchExpectedInputForLaunchPlan( }, nil } - workflowExpectedInputMap = workflowVariableMap.Variables + workflowExpectedInputMap = workflowVariableMap.GetVariables() for name, defaultInput := range defaultInputMap { value, ok := workflowExpectedInputMap[name] if !ok { diff --git a/flyteadmin/pkg/manager/impl/validation/launch_plan_validator_test.go b/flyteadmin/pkg/manager/impl/validation/launch_plan_validator_test.go index ab2832eeeb..8dee3e3cca 100644 --- a/flyteadmin/pkg/manager/impl/validation/launch_plan_validator_test.go +++ b/flyteadmin/pkg/manager/impl/validation/launch_plan_validator_test.go @@ -20,7 +20,7 @@ const ( var lpApplicationConfig = testutils.GetApplicationConfigWithDefaultDomains() func getWorkflowInterface() *core.TypedInterface { - return testutils.GetSampleWorkflowSpecForTest().Template.Interface + return testutils.GetSampleWorkflowSpecForTest().GetTemplate().GetInterface() } func TestValidateLpEmptyProject(t *testing.T) { @@ -358,7 +358,7 @@ func TestValidateSchedule_ArgNotFixed(t *testing.T) { }, } t.Run("with deprecated cron expression", func(t *testing.T) { - request := testutils.GetLaunchPlanRequestWithDeprecatedCronSchedule("* * * * * *") + request := testutils.GetLaunchPlanRequestWithDeprecatedCronSchedule("* * * * *") err := validateSchedule(request, inputMap) assert.NotNil(t, err) @@ -370,7 +370,7 @@ func TestValidateSchedule_ArgNotFixed(t *testing.T) { assert.NotNil(t, err) }) t.Run("with cron schedule", func(t *testing.T) { - request := testutils.GetLaunchPlanRequestWithCronSchedule("* * * * * *") + request := testutils.GetLaunchPlanRequestWithCronSchedule("* * * * *") err := validateSchedule(request, inputMap) assert.NotNil(t, err) @@ -378,7 +378,7 @@ func TestValidateSchedule_ArgNotFixed(t *testing.T) { } func TestValidateSchedule_KickoffTimeArgDoesNotExist(t *testing.T) { - request := testutils.GetLaunchPlanRequestWithDeprecatedCronSchedule("* * * * * *") + request := testutils.GetLaunchPlanRequestWithDeprecatedCronSchedule("* * * * *") inputMap := &core.ParameterMap{ Parameters: map[string]*core.Parameter{}, } @@ -389,7 +389,7 @@ func TestValidateSchedule_KickoffTimeArgDoesNotExist(t *testing.T) { } func TestValidateSchedule_KickoffTimeArgPointsAtWrongType(t *testing.T) { - request := testutils.GetLaunchPlanRequestWithDeprecatedCronSchedule("* * * * * *") + request := testutils.GetLaunchPlanRequestWithDeprecatedCronSchedule("* * * * *") inputMap := &core.ParameterMap{ Parameters: map[string]*core.Parameter{ foo: { @@ -409,7 +409,7 @@ func TestValidateSchedule_KickoffTimeArgPointsAtWrongType(t *testing.T) { } func TestValidateSchedule_NoRequired(t *testing.T) { - request := testutils.GetLaunchPlanRequestWithDeprecatedCronSchedule("* * * * * *") + request := testutils.GetLaunchPlanRequestWithDeprecatedCronSchedule("* * * * *") inputMap := &core.ParameterMap{ Parameters: map[string]*core.Parameter{ foo: { @@ -428,7 +428,7 @@ func TestValidateSchedule_NoRequired(t *testing.T) { } func TestValidateSchedule_KickoffTimeBound(t *testing.T) { - request := testutils.GetLaunchPlanRequestWithDeprecatedCronSchedule("* * * * * *") + request := testutils.GetLaunchPlanRequestWithDeprecatedCronSchedule("* * * * *") inputMap := &core.ParameterMap{ Parameters: map[string]*core.Parameter{ foo: { @@ -446,3 +446,34 @@ func TestValidateSchedule_KickoffTimeBound(t *testing.T) { err := validateSchedule(request, inputMap) assert.Nil(t, err) } + +func TestValidateSchedule_InvalidCronExpression(t *testing.T) { + inputMap := &core.ParameterMap{ + Parameters: map[string]*core.Parameter{ + foo: { + Var: &core.Variable{ + Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_DATETIME}}, + }, + Behavior: &core.Parameter_Required{ + Required: true, + }, + }, + }, + } + + t.Run("with unsupported cron special characters on deprecated cron schedule: #", func(t *testing.T) { + request := testutils.GetLaunchPlanRequestWithDeprecatedCronSchedule("* * * * MON#1") + request.Spec.EntityMetadata.Schedule.KickoffTimeInputArg = foo + + err := validateSchedule(request, inputMap) + assert.NotNil(t, err) + }) + + t.Run("with unsupported cron special characters: #", func(t *testing.T) { + request := testutils.GetLaunchPlanRequestWithCronSchedule("* * * * MON#1") + request.Spec.EntityMetadata.Schedule.KickoffTimeInputArg = foo + + err := validateSchedule(request, inputMap) + assert.NotNil(t, err) + }) +} diff --git a/flyteadmin/pkg/manager/impl/validation/named_entity_validator.go b/flyteadmin/pkg/manager/impl/validation/named_entity_validator.go index e9af05f527..3b8fb6963e 100644 --- a/flyteadmin/pkg/manager/impl/validation/named_entity_validator.go +++ b/flyteadmin/pkg/manager/impl/validation/named_entity_validator.go @@ -13,46 +13,46 @@ import ( var archivableResourceTypes = sets.NewInt32(int32(core.ResourceType_WORKFLOW), int32(core.ResourceType_TASK), int32(core.ResourceType_LAUNCH_PLAN)) func ValidateNamedEntityGetRequest(request *admin.NamedEntityGetRequest) error { - if err := ValidateResourceType(request.ResourceType); err != nil { + if err := ValidateResourceType(request.GetResourceType()); err != nil { return err } - if err := ValidateNamedEntityIdentifier(request.Id); err != nil { + if err := ValidateNamedEntityIdentifier(request.GetId()); err != nil { return err } return nil } func ValidateNamedEntityUpdateRequest(request *admin.NamedEntityUpdateRequest) error { - if err := ValidateResourceType(request.ResourceType); err != nil { + if err := ValidateResourceType(request.GetResourceType()); err != nil { return err } - if err := ValidateNamedEntityIdentifier(request.Id); err != nil { + if err := ValidateNamedEntityIdentifier(request.GetId()); err != nil { return err } - if request.Metadata == nil { + if request.GetMetadata() == nil { return shared.GetMissingArgumentError(shared.Metadata) } // Only tasks and workflow resources can be modified from the default state. - if request.Metadata.State != admin.NamedEntityState_NAMED_ENTITY_ACTIVE && - !archivableResourceTypes.Has(int32(request.ResourceType)) { + if request.GetMetadata().GetState() != admin.NamedEntityState_NAMED_ENTITY_ACTIVE && + !archivableResourceTypes.Has(int32(request.GetResourceType())) { return errors.NewFlyteAdminErrorf(codes.InvalidArgument, - "Resource [%s] cannot have its state updated", request.ResourceType.String()) + "Resource [%s] cannot have its state updated", request.GetResourceType().String()) } return nil } func ValidateNamedEntityListRequest(request *admin.NamedEntityListRequest) error { - if err := ValidateEmptyStringField(request.Project, shared.Project); err != nil { + if err := ValidateEmptyStringField(request.GetProject(), shared.Project); err != nil { return err } - if err := ValidateEmptyStringField(request.Domain, shared.Domain); err != nil { + if err := ValidateEmptyStringField(request.GetDomain(), shared.Domain); err != nil { return err } - if err := ValidateResourceType(request.ResourceType); err != nil { + if err := ValidateResourceType(request.GetResourceType()); err != nil { return err } - if err := ValidateLimit(request.Limit); err != nil { + if err := ValidateLimit(request.GetLimit()); err != nil { return err } return nil diff --git a/flyteadmin/pkg/manager/impl/validation/node_execution_validator.go b/flyteadmin/pkg/manager/impl/validation/node_execution_validator.go index 9ac920d143..c48dde85cd 100644 --- a/flyteadmin/pkg/manager/impl/validation/node_execution_validator.go +++ b/flyteadmin/pkg/manager/impl/validation/node_execution_validator.go @@ -11,66 +11,66 @@ func ValidateNodeExecutionIdentifier(identifier *core.NodeExecutionIdentifier) e if identifier == nil { return shared.GetMissingArgumentError(shared.ID) } - if identifier.ExecutionId == nil { + if identifier.GetExecutionId() == nil { return shared.GetMissingArgumentError(shared.ExecutionID) } - if identifier.NodeId == "" { + if identifier.GetNodeId() == "" { return shared.GetMissingArgumentError(shared.NodeID) } - return ValidateWorkflowExecutionIdentifier(identifier.ExecutionId) + return ValidateWorkflowExecutionIdentifier(identifier.GetExecutionId()) } // Validates that NodeExecutionEventRequests handled by admin include a valid node execution identifier. // In the case the event specifies a DynamicWorkflow in the TaskNodeMetadata, this method also validates the contents of // the dynamic workflow. func ValidateNodeExecutionEventRequest(request *admin.NodeExecutionEventRequest, maxOutputSizeInBytes int64) error { - if request.Event == nil { + if request.GetEvent() == nil { return shared.GetMissingArgumentError(shared.Event) } - err := ValidateNodeExecutionIdentifier(request.Event.Id) + err := ValidateNodeExecutionIdentifier(request.GetEvent().GetId()) if err != nil { return err } - if request.Event.GetTaskNodeMetadata() != nil && request.Event.GetTaskNodeMetadata().DynamicWorkflow != nil { - dynamicWorkflowNodeMetadata := request.Event.GetTaskNodeMetadata().DynamicWorkflow - if err := ValidateIdentifier(dynamicWorkflowNodeMetadata.Id, common.Workflow); err != nil { + if request.GetEvent().GetTaskNodeMetadata() != nil && request.GetEvent().GetTaskNodeMetadata().GetDynamicWorkflow() != nil { + dynamicWorkflowNodeMetadata := request.GetEvent().GetTaskNodeMetadata().GetDynamicWorkflow() + if err := ValidateIdentifier(dynamicWorkflowNodeMetadata.GetId(), common.Workflow); err != nil { return err } - if dynamicWorkflowNodeMetadata.CompiledWorkflow == nil { + if dynamicWorkflowNodeMetadata.GetCompiledWorkflow() == nil { return shared.GetMissingArgumentError("compiled dynamic workflow") } - if dynamicWorkflowNodeMetadata.CompiledWorkflow.Primary == nil { + if dynamicWorkflowNodeMetadata.GetCompiledWorkflow().GetPrimary() == nil { return shared.GetMissingArgumentError("primary dynamic workflow") } - if dynamicWorkflowNodeMetadata.CompiledWorkflow.Primary.Template == nil { + if dynamicWorkflowNodeMetadata.GetCompiledWorkflow().GetPrimary().GetTemplate() == nil { return shared.GetMissingArgumentError("primary dynamic workflow template") } - if err := ValidateIdentifier(dynamicWorkflowNodeMetadata.CompiledWorkflow.Primary.Template.Id, common.Workflow); err != nil { + if err := ValidateIdentifier(dynamicWorkflowNodeMetadata.GetCompiledWorkflow().GetPrimary().GetTemplate().GetId(), common.Workflow); err != nil { return err } } - if err := ValidateOutputData(request.Event.GetOutputData(), maxOutputSizeInBytes); err != nil { + if err := ValidateOutputData(request.GetEvent().GetOutputData(), maxOutputSizeInBytes); err != nil { return err } return nil } func ValidateNodeExecutionListRequest(request *admin.NodeExecutionListRequest) error { - if err := ValidateWorkflowExecutionIdentifier(request.WorkflowExecutionId); err != nil { + if err := ValidateWorkflowExecutionIdentifier(request.GetWorkflowExecutionId()); err != nil { return shared.GetMissingArgumentError(shared.ExecutionID) } - if err := ValidateLimit(request.Limit); err != nil { + if err := ValidateLimit(request.GetLimit()); err != nil { return err } return nil } func ValidateNodeExecutionForTaskListRequest(request *admin.NodeExecutionForTaskListRequest) error { - if err := ValidateTaskExecutionIdentifier(request.TaskExecutionId); err != nil { + if err := ValidateTaskExecutionIdentifier(request.GetTaskExecutionId()); err != nil { return err } - if err := ValidateLimit(request.Limit); err != nil { + if err := ValidateLimit(request.GetLimit()); err != nil { return err } return nil diff --git a/flyteadmin/pkg/manager/impl/validation/notifications_validator.go b/flyteadmin/pkg/manager/impl/validation/notifications_validator.go index 8b8f9a68e7..6dfad3a166 100644 --- a/flyteadmin/pkg/manager/impl/validation/notifications_validator.go +++ b/flyteadmin/pkg/manager/impl/validation/notifications_validator.go @@ -23,22 +23,22 @@ func validateNotifications(notifications []*admin.Notification) error { for _, notif := range notifications { switch { case notif.GetEmail() != nil: - if err := validateRecipientsEmail(notif.GetEmail().RecipientsEmail); err != nil { + if err := validateRecipientsEmail(notif.GetEmail().GetRecipientsEmail()); err != nil { return err } case notif.GetSlack() != nil: - if err := validateRecipientsEmail(notif.GetSlack().RecipientsEmail); err != nil { + if err := validateRecipientsEmail(notif.GetSlack().GetRecipientsEmail()); err != nil { return err } case notif.GetPagerDuty() != nil: - if err := validateRecipientsEmail(notif.GetPagerDuty().RecipientsEmail); err != nil { + if err := validateRecipientsEmail(notif.GetPagerDuty().GetRecipientsEmail()); err != nil { return err } default: return shared.GetInvalidArgumentError("notification type") } - for _, phase := range notif.Phases { + for _, phase := range notif.GetPhases() { if !common.IsExecutionTerminal(phase) { return shared.GetInvalidArgumentError("phase") } diff --git a/flyteadmin/pkg/manager/impl/validation/project_validator.go b/flyteadmin/pkg/manager/impl/validation/project_validator.go index 76bab900c1..fbdd6a0ca2 100644 --- a/flyteadmin/pkg/manager/impl/validation/project_validator.go +++ b/flyteadmin/pkg/manager/impl/validation/project_validator.go @@ -21,40 +21,40 @@ const maxDescriptionLength = 300 const maxLabelArrayLength = 16 func ValidateProjectRegisterRequest(request *admin.ProjectRegisterRequest) error { - if request.Project == nil { + if request.GetProject() == nil { return shared.GetMissingArgumentError(shared.Project) } - project := request.Project - if err := ValidateEmptyStringField(project.Name, projectName); err != nil { + project := request.GetProject() + if err := ValidateEmptyStringField(project.GetName(), projectName); err != nil { return err } return ValidateProject(project) } func ValidateProjectGetRequest(request *admin.ProjectGetRequest) error { - if err := ValidateEmptyStringField(request.Id, projectID); err != nil { + if err := ValidateEmptyStringField(request.GetId(), projectID); err != nil { return err } return nil } func ValidateProject(project *admin.Project) error { - if err := ValidateEmptyStringField(project.Id, projectID); err != nil { + if err := ValidateEmptyStringField(project.GetId(), projectID); err != nil { return err } - if err := validateLabels(project.Labels); err != nil { + if err := validateLabels(project.GetLabels()); err != nil { return err } - if errs := validation.IsDNS1123Label(project.Id); len(errs) > 0 { - return errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid project id [%s]: %v", project.Id, errs) + if errs := validation.IsDNS1123Label(project.GetId()); len(errs) > 0 { + return errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid project id [%s]: %v", project.GetId(), errs) } - if err := ValidateMaxLengthStringField(project.Name, projectName, maxNameLength); err != nil { + if err := ValidateMaxLengthStringField(project.GetName(), projectName, maxNameLength); err != nil { return err } - if err := ValidateMaxLengthStringField(project.Description, projectDescription, maxDescriptionLength); err != nil { + if err := ValidateMaxLengthStringField(project.GetDescription(), projectDescription, maxDescriptionLength); err != nil { return err } - if project.Domains != nil { + if project.GetDomains() != nil { return errors.NewFlyteAdminError(codes.InvalidArgument, "Domains are currently only set system wide. Please retry without domains included in your request.") } diff --git a/flyteadmin/pkg/manager/impl/validation/shared_execution.go b/flyteadmin/pkg/manager/impl/validation/shared_execution.go index 07e2a26fb0..1ee17d1b8c 100644 --- a/flyteadmin/pkg/manager/impl/validation/shared_execution.go +++ b/flyteadmin/pkg/manager/impl/validation/shared_execution.go @@ -14,9 +14,9 @@ import ( // ValidateClusterForExecutionID validates that the execution denoted by executionId is recorded as executing on `cluster`. func ValidateClusterForExecutionID(ctx context.Context, db repoInterfaces.Repository, executionID *core.WorkflowExecutionIdentifier, clusterInEvent string) error { workflowExecution, err := db.ExecutionRepo().Get(ctx, repoInterfaces.Identifier{ - Project: executionID.Project, - Domain: executionID.Domain, - Name: executionID.Name, + Project: executionID.GetProject(), + Domain: executionID.GetDomain(), + Name: executionID.GetName(), }) if err != nil { logger.Debugf(ctx, "Failed to find existing execution with id [%+v] with err: %v", executionID, err) diff --git a/flyteadmin/pkg/manager/impl/validation/signal_validator.go b/flyteadmin/pkg/manager/impl/validation/signal_validator.go index af1d4425aa..0ba2d3b704 100644 --- a/flyteadmin/pkg/manager/impl/validation/signal_validator.go +++ b/flyteadmin/pkg/manager/impl/validation/signal_validator.go @@ -15,13 +15,13 @@ import ( ) func ValidateSignalGetOrCreateRequest(ctx context.Context, request *admin.SignalGetOrCreateRequest) error { - if request.Id == nil { + if request.GetId() == nil { return shared.GetMissingArgumentError("id") } - if err := ValidateSignalIdentifier(request.Id); err != nil { + if err := ValidateSignalIdentifier(request.GetId()); err != nil { return err } - if request.Type == nil { + if request.GetType() == nil { return shared.GetMissingArgumentError("type") } @@ -29,39 +29,39 @@ func ValidateSignalGetOrCreateRequest(ctx context.Context, request *admin.Signal } func ValidateSignalIdentifier(identifier *core.SignalIdentifier) error { - if identifier.ExecutionId == nil { + if identifier.GetExecutionId() == nil { return shared.GetMissingArgumentError(shared.ExecutionID) } - if identifier.SignalId == "" { + if identifier.GetSignalId() == "" { return shared.GetMissingArgumentError("signal_id") } - return ValidateWorkflowExecutionIdentifier(identifier.ExecutionId) + return ValidateWorkflowExecutionIdentifier(identifier.GetExecutionId()) } func ValidateSignalListRequest(ctx context.Context, request *admin.SignalListRequest) error { - if err := ValidateWorkflowExecutionIdentifier(request.WorkflowExecutionId); err != nil { + if err := ValidateWorkflowExecutionIdentifier(request.GetWorkflowExecutionId()); err != nil { return shared.GetMissingArgumentError(shared.ExecutionID) } - if err := ValidateLimit(request.Limit); err != nil { + if err := ValidateLimit(request.GetLimit()); err != nil { return err } return nil } func ValidateSignalSetRequest(ctx context.Context, db repositoryInterfaces.Repository, request *admin.SignalSetRequest) error { - if request.Id == nil { + if request.GetId() == nil { return shared.GetMissingArgumentError("id") } - if err := ValidateSignalIdentifier(request.Id); err != nil { + if err := ValidateSignalIdentifier(request.GetId()); err != nil { return err } - if request.Value == nil { + if request.GetValue() == nil { return shared.GetMissingArgumentError("value") } // validate that signal value matches type of existing signal - signalModel, err := transformers.CreateSignalModel(request.Id, nil, nil) + signalModel, err := transformers.CreateSignalModel(request.GetId(), nil, nil) if err != nil { return nil } @@ -71,7 +71,7 @@ func ValidateSignalSetRequest(ctx context.Context, db repositoryInterfaces.Repos "failed to validate that signal [%v] exists, err: [%+v]", signalModel.SignalKey, err) } - valueType := propellervalidators.LiteralTypeForLiteral(request.Value) + valueType := propellervalidators.LiteralTypeForLiteral(request.GetValue()) lookupSignal, err := transformers.FromSignalModel(lookupSignalModel) if err != nil { return err @@ -80,10 +80,10 @@ func ValidateSignalSetRequest(ctx context.Context, db repositoryInterfaces.Repos if err != nil { return errors.NewInvalidLiteralTypeError("", err) } - if !propellervalidators.AreTypesCastable(lookupSignal.Type, valueType) { + if !propellervalidators.AreTypesCastable(lookupSignal.GetType(), valueType) { return errors.NewFlyteAdminErrorf(codes.InvalidArgument, "requested signal value [%v] is not castable to existing signal type [%v]", - request.Value, lookupSignalModel.Type) + request.GetValue(), lookupSignalModel.Type) } return nil diff --git a/flyteadmin/pkg/manager/impl/validation/task_execution_validator.go b/flyteadmin/pkg/manager/impl/validation/task_execution_validator.go index dfe80541b0..dee4b86c3b 100644 --- a/flyteadmin/pkg/manager/impl/validation/task_execution_validator.go +++ b/flyteadmin/pkg/manager/impl/validation/task_execution_validator.go @@ -8,20 +8,20 @@ import ( ) func ValidateTaskExecutionRequest(request *admin.TaskExecutionEventRequest, maxOutputSizeInBytes int64) error { - if request.Event == nil { + if request.GetEvent() == nil { return shared.GetMissingArgumentError(shared.Event) } - if request.Event.OccurredAt == nil { + if request.GetEvent().GetOccurredAt() == nil { return shared.GetMissingArgumentError(shared.OccurredAt) } - if err := ValidateOutputData(request.Event.GetOutputData(), maxOutputSizeInBytes); err != nil { + if err := ValidateOutputData(request.GetEvent().GetOutputData(), maxOutputSizeInBytes); err != nil { return err } return ValidateTaskExecutionIdentifier(&core.TaskExecutionIdentifier{ - TaskId: request.Event.TaskId, - NodeExecutionId: request.Event.ParentNodeExecutionId, - RetryAttempt: request.Event.RetryAttempt, + TaskId: request.GetEvent().GetTaskId(), + NodeExecutionId: request.GetEvent().GetParentNodeExecutionId(), + RetryAttempt: request.GetEvent().GetRetryAttempt(), }) } @@ -29,19 +29,19 @@ func ValidateTaskExecutionIdentifier(identifier *core.TaskExecutionIdentifier) e if identifier == nil { return shared.GetMissingArgumentError(shared.ID) } - if identifier.NodeExecutionId == nil { + if identifier.GetNodeExecutionId() == nil { return shared.GetMissingArgumentError(shared.NodeExecutionID) } - if err := ValidateNodeExecutionIdentifier(identifier.NodeExecutionId); err != nil { + if err := ValidateNodeExecutionIdentifier(identifier.GetNodeExecutionId()); err != nil { return err } - if identifier.TaskId == nil { + if identifier.GetTaskId() == nil { return shared.GetMissingArgumentError(shared.TaskID) } - if err := ValidateIdentifier(identifier.TaskId, common.Task); err != nil { + if err := ValidateIdentifier(identifier.GetTaskId(), common.Task); err != nil { return err } @@ -49,10 +49,10 @@ func ValidateTaskExecutionIdentifier(identifier *core.TaskExecutionIdentifier) e } func ValidateTaskExecutionListRequest(request *admin.TaskExecutionListRequest) error { - if err := ValidateNodeExecutionIdentifier(request.NodeExecutionId); err != nil { + if err := ValidateNodeExecutionIdentifier(request.GetNodeExecutionId()); err != nil { return err } - if err := ValidateLimit(request.Limit); err != nil { + if err := ValidateLimit(request.GetLimit()); err != nil { return err } return nil diff --git a/flyteadmin/pkg/manager/impl/validation/task_validator.go b/flyteadmin/pkg/manager/impl/validation/task_validator.go index 0f0f86fb0b..991048d97e 100644 --- a/flyteadmin/pkg/manager/impl/validation/task_validator.go +++ b/flyteadmin/pkg/manager/impl/validation/task_validator.go @@ -26,17 +26,17 @@ var whitelistedTaskErr = errors.NewFlyteAdminErrorf(codes.InvalidArgument, "task // This is called for a task with a non-nil container. func validateContainer(task *core.TaskTemplate, platformTaskResources workflowengineInterfaces.TaskResources) error { - if err := ValidateEmptyStringField(task.GetContainer().Image, shared.Image); err != nil { + if err := ValidateEmptyStringField(task.GetContainer().GetImage(), shared.Image); err != nil { return err } - if task.GetContainer().Resources == nil { + if task.GetContainer().GetResources() == nil { return nil } - if err := validateTaskResources(task.Id, platformTaskResources.Limits, task.GetContainer().Resources.Requests, - task.GetContainer().Resources.Limits); err != nil { + if err := validateTaskResources(task.GetId(), platformTaskResources.Limits, task.GetContainer().GetResources().GetRequests(), + task.GetContainer().GetResources().GetLimits()); err != nil { logger.Debugf(context.Background(), "encountered errors validating task resources for [%+v]: %v", - task.Id, err) + task.GetId(), err) return err } return nil @@ -44,23 +44,23 @@ func validateContainer(task *core.TaskTemplate, platformTaskResources workflowen // This is called for a task with a non-nil k8s pod. func validateK8sPod(task *core.TaskTemplate, platformTaskResources workflowengineInterfaces.TaskResources) error { - if task.GetK8SPod().PodSpec == nil { + if task.GetK8SPod().GetPodSpec() == nil { return errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid TaskSpecification, pod tasks should specify their target as a K8sPod with a defined pod spec") } var podSpec corev1.PodSpec - if err := utils.UnmarshalStructToObj(task.GetK8SPod().PodSpec, &podSpec); err != nil { + if err := utils.UnmarshalStructToObj(task.GetK8SPod().GetPodSpec(), &podSpec); err != nil { logger.Debugf(context.Background(), "failed to unmarshal k8s podspec [%+v]: %v", - task.GetK8SPod().PodSpec, err) + task.GetK8SPod().GetPodSpec(), err) return err } platformTaskResourceLimits := taskResourceSetToMap(platformTaskResources.Limits) for _, container := range podSpec.Containers { - err := validateResource(task.Id, resourceListToQuantity(container.Resources.Requests), + err := validateResource(task.GetId(), resourceListToQuantity(container.Resources.Requests), resourceListToQuantity(container.Resources.Limits), platformTaskResourceLimits) if err != nil { logger.Debugf(context.Background(), "encountered errors validating task resources for [%+v]: %v", - task.Id, err) + task.GetId(), err) return err } } @@ -69,7 +69,7 @@ func validateK8sPod(task *core.TaskTemplate, platformTaskResources workflowengin } func validateRuntimeMetadata(metadata *core.RuntimeMetadata) error { - if err := ValidateEmptyStringField(metadata.Version, shared.RuntimeVersion); err != nil { + if err := ValidateEmptyStringField(metadata.GetVersion(), shared.RuntimeVersion); err != nil { return err } return nil @@ -78,21 +78,21 @@ func validateRuntimeMetadata(metadata *core.RuntimeMetadata) error { func validateTaskTemplate(taskID *core.Identifier, task *core.TaskTemplate, platformTaskResources workflowengineInterfaces.TaskResources, whitelistConfig runtime.WhitelistConfiguration) error { - if err := ValidateEmptyStringField(task.Type, shared.Type); err != nil { + if err := ValidateEmptyStringField(task.GetType(), shared.Type); err != nil { return err } - if err := validateTaskType(taskID, task.Type, whitelistConfig); err != nil { + if err := validateTaskType(taskID, task.GetType(), whitelistConfig); err != nil { return err } - if task.Metadata == nil { + if task.GetMetadata() == nil { return shared.GetMissingArgumentError(shared.Metadata) } - if task.Metadata.Runtime != nil { - if err := validateRuntimeMetadata(task.Metadata.Runtime); err != nil { + if task.GetMetadata().GetRuntime() != nil { + if err := validateRuntimeMetadata(task.GetMetadata().GetRuntime()); err != nil { return err } } - if task.Interface == nil { + if task.GetInterface() == nil { // The actual interface proto has nothing to validate. return shared.GetMissingArgumentError(shared.TypedInterface) } @@ -110,16 +110,16 @@ func ValidateTask( ctx context.Context, request *admin.TaskCreateRequest, db repositoryInterfaces.Repository, platformTaskResources workflowengineInterfaces.TaskResources, whitelistConfig runtime.WhitelistConfiguration, applicationConfig runtime.ApplicationConfiguration) error { - if err := ValidateIdentifier(request.Id, common.Task); err != nil { + if err := ValidateIdentifier(request.GetId(), common.Task); err != nil { return err } - if err := ValidateProjectAndDomain(ctx, db, applicationConfig, request.Id.Project, request.Id.Domain); err != nil { + if err := ValidateProjectAndDomain(ctx, db, applicationConfig, request.GetId().GetProject(), request.GetId().GetDomain()); err != nil { return err } - if request.Spec == nil || request.Spec.Template == nil { + if request.GetSpec() == nil || request.GetSpec().GetTemplate() == nil { return shared.GetMissingArgumentError(shared.Spec) } - return validateTaskTemplate(request.Id, request.Spec.Template, platformTaskResources, whitelistConfig) + return validateTaskTemplate(request.GetId(), request.GetSpec().GetTemplate(), platformTaskResources, whitelistConfig) } func taskResourceSetToMap( @@ -143,18 +143,18 @@ func taskResourceSetToMap( func addResourceEntryToMap( identifier *core.Identifier, entry *core.Resources_ResourceEntry, resourceEntries *map[core.Resources_ResourceName]resource.Quantity) error { - if _, ok := (*resourceEntries)[entry.Name]; ok { + if _, ok := (*resourceEntries)[entry.GetName()]; ok { return errors.NewFlyteAdminErrorf(codes.InvalidArgument, - "can't specify %v limit for task [%+v] multiple times", entry.Name, identifier) + "can't specify %v limit for task [%+v] multiple times", entry.GetName(), identifier) } - quantity, err := resource.ParseQuantity(entry.Value) + quantity, err := resource.ParseQuantity(entry.GetValue()) if err != nil { return errors.NewFlyteAdminErrorf(codes.InvalidArgument, "Parsing of %v request failed for value %v - reason %v. "+ "Please follow K8s conventions for resources "+ - "https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", entry.Name, entry.Value, err) + "https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", entry.GetName(), entry.GetValue(), err) } - (*resourceEntries)[entry.Name] = quantity + (*resourceEntries)[entry.GetName()] = quantity return nil } @@ -184,7 +184,7 @@ func requestedResourcesToQuantity( var requestedToQuantity = make(map[core.Resources_ResourceName]resource.Quantity) for _, limitEntry := range resources { - switch limitEntry.Name { + switch limitEntry.GetName() { case core.Resources_CPU: fallthrough case core.Resources_MEMORY: @@ -199,7 +199,7 @@ func requestedResourcesToQuantity( } if !isWholeNumber(requestedToQuantity[core.Resources_GPU]) { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, - "gpu for [%+v] must be a whole number, got: %s instead", identifier, limitEntry.Value) + "gpu for [%+v] must be a whole number, got: %s instead", identifier, limitEntry.GetValue()) } case core.Resources_EPHEMERAL_STORAGE: err := addResourceEntryToMap(identifier, limitEntry, &requestedToQuantity) @@ -252,15 +252,14 @@ func validateResource(identifier *core.Identifier, requestedResourceDefaults, if ok && platformLimitOk && limitQuantity.Value() > platformLimit.Value() { // Also check that the requested limit is less than the platform task limit. return errors.NewFlyteAdminErrorf(codes.InvalidArgument, - "Requested %v limit [%v] is greater than current limit set in the platform configuration"+ - " [%v]. Please contact Flyte Admins to change these limits or consult the configuration", + "Requested %v limit [%v] is greater than current limit set in the platform configuration [%v]. Please contact Flyte Admins to change these limits or consult the configuration", resourceName, limitQuantity.String(), platformLimit.String()) } if platformLimitOk && defaultQuantity.Value() > platformTaskResourceLimits[resourceName].Value() { // Also check that the requested limit is less than the platform task limit. return errors.NewFlyteAdminErrorf(codes.InvalidArgument, - "Requested %v default [%v] is greater than current limit set in the platform configuration"+ - " [%v]. Please contact Flyte Admins to change these limits or consult the configuration", + "Requested %v default [%v] is greater than current limit set in the platform configuration [%v]. Please contact Flyte Admins to change these limits or consult the configuration", + resourceName, defaultQuantity.String(), platformTaskResourceLimits[resourceName].String()) } case core.Resources_GPU: @@ -273,8 +272,7 @@ func validateResource(identifier *core.Identifier, requestedResourceDefaults, platformLimit, platformLimitOk := platformTaskResourceLimits[resourceName] if platformLimitOk && defaultQuantity.Value() > platformLimit.Value() { return errors.NewFlyteAdminErrorf(codes.InvalidArgument, - "Requested %v default [%v] is greater than current limit set in the platform configuration"+ - " [%v]. Please contact Flyte Admins to change these limits or consult the configuration", + "Requested %v default [%v] is greater than current limit set in the platform configuration [%v]. Please contact Flyte Admins to change these limits or consult the configuration", resourceName, defaultQuantity.String(), platformLimit.String()) } } @@ -295,14 +293,14 @@ func validateTaskType(taskID *core.Identifier, taskType string, whitelistConfig if scope.Project == "" { // All projects whitelisted return nil - } else if scope.Project != taskID.Project { + } else if scope.Project != taskID.GetProject() { continue } // We have a potential match! Verify that this task type is approved given the specificity of the whitelist. if scope.Domain == "" { // All domains for this project are whitelisted return nil - } else if scope.Domain == taskID.Domain { + } else if scope.Domain == taskID.GetDomain() { return nil } diff --git a/flyteadmin/pkg/manager/impl/validation/validation.go b/flyteadmin/pkg/manager/impl/validation/validation.go index de2927495c..03bc8f963d 100644 --- a/flyteadmin/pkg/manager/impl/validation/validation.go +++ b/flyteadmin/pkg/manager/impl/validation/validation.go @@ -50,10 +50,10 @@ func ValidateMaxMapLengthField(m map[string]string, fieldName string, limit int) } func validateLabels(labels *admin.Labels) error { - if labels == nil || len(labels.Values) == 0 { + if labels == nil || len(labels.GetValues()) == 0 { return nil } - if err := ValidateMaxMapLengthField(labels.Values, "labels", maxLabelArrayLength); err != nil { + if err := ValidateMaxMapLengthField(labels.GetValues(), "labels", maxLabelArrayLength); err != nil { return err } if err := validateLabelsAlphanumeric(labels); err != nil { @@ -65,7 +65,7 @@ func validateLabels(labels *admin.Labels) error { // Given an admin.Labels, checks if the labels exist or not and if it does, checks if the labels are K8s compliant, // i.e. alphanumeric + - and _ func validateLabelsAlphanumeric(labels *admin.Labels) error { - for key, value := range labels.Values { + for key, value := range labels.GetValues() { if errs := validation.IsQualifiedName(key); len(errs) > 0 { return errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid label key [%s]: %v", key, errs) } @@ -80,16 +80,16 @@ func ValidateIdentifierFieldsSet(id *core.Identifier) error { if id == nil { return shared.GetMissingArgumentError(shared.ID) } - if err := ValidateEmptyStringField(id.Project, shared.Project); err != nil { + if err := ValidateEmptyStringField(id.GetProject(), shared.Project); err != nil { return err } - if err := ValidateEmptyStringField(id.Domain, shared.Domain); err != nil { + if err := ValidateEmptyStringField(id.GetDomain(), shared.Domain); err != nil { return err } - if err := ValidateEmptyStringField(id.Name, shared.Name); err != nil { + if err := ValidateEmptyStringField(id.GetName(), shared.Name); err != nil { return err } - if err := ValidateEmptyStringField(id.Version, shared.Version); err != nil { + if err := ValidateEmptyStringField(id.GetVersion(), shared.Version); err != nil { return err } return nil @@ -100,10 +100,10 @@ func ValidateIdentifier(id *core.Identifier, expectedType common.Entity) error { if id == nil { return shared.GetMissingArgumentError(shared.ID) } - if entityToResourceType[expectedType] != id.ResourceType { + if entityToResourceType[expectedType] != id.GetResourceType() { return errors.NewFlyteAdminErrorf(codes.InvalidArgument, "unexpected resource type %s for identifier [%+v], expected %s instead", - strings.ToLower(id.ResourceType.String()), id, strings.ToLower(entityToResourceType[expectedType].String())) + strings.ToLower(id.GetResourceType().String()), id, strings.ToLower(entityToResourceType[expectedType].String())) } return ValidateIdentifierFieldsSet(id) } @@ -113,13 +113,13 @@ func ValidateNamedEntityIdentifier(id *admin.NamedEntityIdentifier) error { if id == nil { return shared.GetMissingArgumentError(shared.ID) } - if err := ValidateEmptyStringField(id.Project, shared.Project); err != nil { + if err := ValidateEmptyStringField(id.GetProject(), shared.Project); err != nil { return err } - if err := ValidateEmptyStringField(id.Domain, shared.Domain); err != nil { + if err := ValidateEmptyStringField(id.GetDomain(), shared.Domain); err != nil { return err } - if err := ValidateEmptyStringField(id.Name, shared.Name); err != nil { + if err := ValidateEmptyStringField(id.GetName(), shared.Name); err != nil { return err } return nil @@ -144,92 +144,92 @@ func ValidateVersion(version string) error { } func ValidateResourceListRequest(request *admin.ResourceListRequest) error { - if request.Id == nil { + if request.GetId() == nil { return shared.GetMissingArgumentError(shared.ID) } - if err := ValidateEmptyStringField(request.Id.Project, shared.Project); err != nil { + if err := ValidateEmptyStringField(request.GetId().GetProject(), shared.Project); err != nil { return err } - if err := ValidateEmptyStringField(request.Id.Domain, shared.Domain); err != nil { + if err := ValidateEmptyStringField(request.GetId().GetDomain(), shared.Domain); err != nil { return err } - if err := ValidateLimit(request.Limit); err != nil { + if err := ValidateLimit(request.GetLimit()); err != nil { return err } return nil } func ValidateDescriptionEntityListRequest(request *admin.DescriptionEntityListRequest) error { - if request.Id == nil { + if request.GetId() == nil { return shared.GetMissingArgumentError(shared.ID) } - if err := ValidateEmptyStringField(request.Id.Project, shared.Project); err != nil { + if err := ValidateEmptyStringField(request.GetId().GetProject(), shared.Project); err != nil { return err } - if err := ValidateEmptyStringField(request.Id.Domain, shared.Domain); err != nil { + if err := ValidateEmptyStringField(request.GetId().GetDomain(), shared.Domain); err != nil { return err } - if err := ValidateEmptyStringField(request.Id.Name, shared.Name); err != nil { + if err := ValidateEmptyStringField(request.GetId().GetName(), shared.Name); err != nil { return err } - if err := ValidateLimit(request.Limit); err != nil { + if err := ValidateLimit(request.GetLimit()); err != nil { return err } return nil } func ValidateActiveLaunchPlanRequest(request *admin.ActiveLaunchPlanRequest) error { - if err := ValidateEmptyStringField(request.Id.Project, shared.Project); err != nil { + if err := ValidateEmptyStringField(request.GetId().GetProject(), shared.Project); err != nil { return err } - if err := ValidateEmptyStringField(request.Id.Domain, shared.Domain); err != nil { + if err := ValidateEmptyStringField(request.GetId().GetDomain(), shared.Domain); err != nil { return err } - if err := ValidateEmptyStringField(request.Id.Name, shared.Name); err != nil { + if err := ValidateEmptyStringField(request.GetId().GetName(), shared.Name); err != nil { return err } return nil } func ValidateActiveLaunchPlanListRequest(request *admin.ActiveLaunchPlanListRequest) error { - if err := ValidateEmptyStringField(request.Project, shared.Project); err != nil { + if err := ValidateEmptyStringField(request.GetProject(), shared.Project); err != nil { return err } - if err := ValidateEmptyStringField(request.Domain, shared.Domain); err != nil { + if err := ValidateEmptyStringField(request.GetDomain(), shared.Domain); err != nil { return err } - if err := ValidateLimit(request.Limit); err != nil { + if err := ValidateLimit(request.GetLimit()); err != nil { return err } return nil } func ValidateNamedEntityIdentifierListRequest(request *admin.NamedEntityIdentifierListRequest) error { - if err := ValidateEmptyStringField(request.Project, shared.Project); err != nil { + if err := ValidateEmptyStringField(request.GetProject(), shared.Project); err != nil { return err } - if err := ValidateEmptyStringField(request.Domain, shared.Domain); err != nil { + if err := ValidateEmptyStringField(request.GetDomain(), shared.Domain); err != nil { return err } - if err := ValidateLimit(request.Limit); err != nil { + if err := ValidateLimit(request.GetLimit()); err != nil { return err } return nil } func ValidateDescriptionEntityGetRequest(request *admin.ObjectGetRequest) error { - if err := ValidateResourceType(request.Id.ResourceType); err != nil { + if err := ValidateResourceType(request.GetId().GetResourceType()); err != nil { return err } - if err := ValidateIdentifierFieldsSet(request.Id); err != nil { + if err := ValidateIdentifierFieldsSet(request.GetId()); err != nil { return err } return nil } func validateLiteralMap(inputMap *core.LiteralMap, fieldName string) error { - if inputMap != nil && len(inputMap.Literals) > 0 { - for name, fixedInput := range inputMap.Literals { + if inputMap != nil && len(inputMap.GetLiterals()) > 0 { + for name, fixedInput := range inputMap.GetLiterals() { if name == "" { return errors.NewFlyteAdminErrorf(codes.InvalidArgument, "missing key in %s", fieldName) } @@ -251,8 +251,8 @@ func validateParameterMapAllowArtifacts(inputMap *core.ParameterMap, fieldName s } func validateParameterMapDisableArtifacts(inputMap *core.ParameterMap, fieldName string) error { - if inputMap != nil && len(inputMap.Parameters) > 0 { - for name, defaultInput := range inputMap.Parameters { + if inputMap != nil && len(inputMap.GetParameters()) > 0 { + for name, defaultInput := range inputMap.GetParameters() { if defaultInput.GetArtifactQuery() != nil { return errors.NewFlyteAdminErrorf(codes.InvalidArgument, "artifact mode not enabled but query found %s %s", fieldName, name) } @@ -262,8 +262,8 @@ func validateParameterMapDisableArtifacts(inputMap *core.ParameterMap, fieldName } func validateParameterMap(inputMap *core.ParameterMap, fieldName string) error { - if inputMap != nil && len(inputMap.Parameters) > 0 { - for name, defaultInput := range inputMap.Parameters { + if inputMap != nil && len(inputMap.GetParameters()) > 0 { + for name, defaultInput := range inputMap.GetParameters() { if name == "" { return errors.NewFlyteAdminErrorf(codes.InvalidArgument, "missing key in %s", fieldName) } @@ -347,7 +347,7 @@ func ValidateDatetime(literal *core.Literal) error { err := timestamp.CheckValid() if err != nil { - return errors.NewFlyteAdminErrorf(codes.InvalidArgument, err.Error()) + return errors.NewFlyteAdminErrorf(codes.InvalidArgument, err.Error()) //nolint } return nil } diff --git a/flyteadmin/pkg/manager/impl/validation/workflow_validator.go b/flyteadmin/pkg/manager/impl/validation/workflow_validator.go index d5d2681375..7a5f36e78b 100644 --- a/flyteadmin/pkg/manager/impl/validation/workflow_validator.go +++ b/flyteadmin/pkg/manager/impl/validation/workflow_validator.go @@ -22,13 +22,13 @@ const numSystemNodes = 2 // A workflow graph always has a start and end node inj func ValidateWorkflow( ctx context.Context, request *admin.WorkflowCreateRequest, db repositoryInterfaces.Repository, config runtime.ApplicationConfiguration) error { - if err := ValidateIdentifier(request.Id, common.Workflow); err != nil { + if err := ValidateIdentifier(request.GetId(), common.Workflow); err != nil { return err } - if err := ValidateProjectAndDomain(ctx, db, config, request.Id.Project, request.Id.Domain); err != nil { + if err := ValidateProjectAndDomain(ctx, db, config, request.GetId().GetProject(), request.GetId().GetDomain()); err != nil { return err } - if request.Spec == nil || request.Spec.Template == nil { + if request.GetSpec() == nil || request.GetSpec().GetTemplate() == nil { return shared.GetMissingArgumentError(shared.Spec) } return nil @@ -47,12 +47,12 @@ func ValidateCompiledWorkflow(identifier *core.Identifier, workflow *admin.Workf // Treat this is unset. There is no limit to compare against. return nil } - if workflow.CompiledWorkflow == nil || workflow.CompiledWorkflow.Primary == nil || - workflow.CompiledWorkflow.Primary.Template == nil || workflow.CompiledWorkflow.Primary.Template.Nodes == nil { + if workflow.GetCompiledWorkflow() == nil || workflow.GetCompiledWorkflow().GetPrimary() == nil || + workflow.GetCompiledWorkflow().GetPrimary().GetTemplate() == nil || workflow.GetCompiledWorkflow().GetPrimary().GetTemplate().GetNodes() == nil { logger.Warningf(context.Background(), "workflow [%+v] did not have any primary nodes", identifier) return nil } - numUserNodes := len(workflow.CompiledWorkflow.Primary.Template.Nodes) - numSystemNodes + numUserNodes := len(workflow.GetCompiledWorkflow().GetPrimary().GetTemplate().GetNodes()) - numSystemNodes if numUserNodes > config.GetWorkflowNodeLimit() { return errors.NewFlyteAdminErrorf(codes.InvalidArgument, "number of nodes in workflow [%+v] exceeds limit (%v > %v)", identifier, diff --git a/flyteadmin/pkg/manager/impl/version_manager_test.go b/flyteadmin/pkg/manager/impl/version_manager_test.go index 5cea4a0b15..7b5b5c9bac 100644 --- a/flyteadmin/pkg/manager/impl/version_manager_test.go +++ b/flyteadmin/pkg/manager/impl/version_manager_test.go @@ -24,7 +24,7 @@ func TestVersionManager_GetVersion(t *testing.T) { v, err := vmanager.GetVersion(context.Background(), &admin.GetVersionRequest{}) assert.Nil(t, err) - assert.Equal(t, v.ControlPlaneVersion.BuildTime, buildTime) - assert.Equal(t, v.ControlPlaneVersion.Build, build) - assert.Equal(t, v.ControlPlaneVersion.Version, appversion) + assert.Equal(t, v.GetControlPlaneVersion().GetBuildTime(), buildTime) + assert.Equal(t, v.GetControlPlaneVersion().GetBuild(), build) + assert.Equal(t, v.GetControlPlaneVersion().GetVersion(), appversion) } diff --git a/flyteadmin/pkg/manager/impl/workflow_manager.go b/flyteadmin/pkg/manager/impl/workflow_manager.go index d3bfdc67dd..b99de8773f 100644 --- a/flyteadmin/pkg/manager/impl/workflow_manager.go +++ b/flyteadmin/pkg/manager/impl/workflow_manager.go @@ -48,26 +48,26 @@ type WorkflowManager struct { } func getWorkflowContext(ctx context.Context, identifier *core.Identifier) context.Context { - ctx = contextutils.WithProjectDomain(ctx, identifier.Project, identifier.Domain) - return contextutils.WithWorkflowID(ctx, identifier.Name) + ctx = contextutils.WithProjectDomain(ctx, identifier.GetProject(), identifier.GetDomain()) + return contextutils.WithWorkflowID(ctx, identifier.GetName()) } func (w *WorkflowManager) setDefaults(request *admin.WorkflowCreateRequest) (*admin.WorkflowCreateRequest, error) { // TODO: Also add environment and configuration defaults once those have been determined. - if request.Id == nil { + if request.GetId() == nil { return request, errors.NewFlyteAdminError(codes.InvalidArgument, "missing identifier for WorkflowCreateRequest") } - request.Spec.Template.Id = request.Id + request.Spec.Template.Id = request.GetId() return request, nil } func (w *WorkflowManager) getCompiledWorkflow( ctx context.Context, request *admin.WorkflowCreateRequest) (*admin.WorkflowClosure, error) { - reqs, err := w.compiler.GetRequirements(request.Spec.Template, request.Spec.SubWorkflows) + reqs, err := w.compiler.GetRequirements(request.GetSpec().GetTemplate(), request.GetSpec().GetSubWorkflows()) if err != nil { w.metrics.CompilationFailures.Inc() logger.Errorf(ctx, "Failed to get workflow requirements for template [%+v] with err %v", - request.Spec.Template, err) + request.GetSpec().GetTemplate(), err) return &admin.WorkflowClosure{}, err } @@ -76,10 +76,10 @@ func (w *WorkflowManager) getCompiledWorkflow( task, err := util.GetTask(ctx, w.db, taskID) if err != nil { logger.Debugf(ctx, "Failed to get task with id [%+v] when compiling workflow with id [%+v] with err %v", - taskID, request.Id, err) + taskID, request.GetId(), err) return &admin.WorkflowClosure{}, err } - tasks[idx] = task.Closure.CompiledTask + tasks[idx] = task.GetClosure().GetCompiledTask() } var launchPlans = make([]compiler.InterfaceProvider, len(reqs.GetRequiredLaunchPlanIds())) @@ -88,7 +88,7 @@ func (w *WorkflowManager) getCompiledWorkflow( launchPlanModel, err = util.GetLaunchPlanModel(ctx, w.db, launchPlanID) if err != nil { logger.Debugf(ctx, "Failed to get launch plan with id [%+v] when compiling workflow with id [%+v] with err %v", - launchPlanID, request.Id, err) + launchPlanID, request.GetId(), err) return &admin.WorkflowClosure{}, err } var launchPlanInterfaceProvider workflowengine.InterfaceProvider @@ -101,16 +101,16 @@ func (w *WorkflowManager) getCompiledWorkflow( launchPlans[idx] = launchPlanInterfaceProvider } - closure, err := w.compiler.CompileWorkflow(request.Spec.Template, request.Spec.SubWorkflows, tasks, launchPlans) + closure, err := w.compiler.CompileWorkflow(request.GetSpec().GetTemplate(), request.GetSpec().GetSubWorkflows(), tasks, launchPlans) if err != nil { w.metrics.CompilationFailures.Inc() - logger.Debugf(ctx, "Failed to compile workflow with id [%+v] with err %v", request.Id, err) + logger.Debugf(ctx, "Failed to compile workflow with id [%+v] with err %v", request.GetId(), err) return &admin.WorkflowClosure{}, err } createdAt, err := ptypes.TimestampProto(time.Now()) if err != nil { return &admin.WorkflowClosure{}, errors.NewFlyteAdminErrorf(codes.Internal, - "Failed to serialize CreatedAt: %v when saving compiled workflow %+v", err, request.Id) + "Failed to serialize CreatedAt: %v when saving compiled workflow %+v", err, request.GetId()) } return &admin.WorkflowClosure{ CompiledWorkflow: closure, @@ -121,10 +121,10 @@ func (w *WorkflowManager) getCompiledWorkflow( func (w *WorkflowManager) createDataReference( ctx context.Context, identifier *core.Identifier) (storage.DataReference, error) { nestedSubKeys := []string{ - identifier.Project, - identifier.Domain, - identifier.Name, - identifier.Version, + identifier.GetProject(), + identifier.GetDomain(), + identifier.GetName(), + identifier.GetVersion(), } nestedKeys := append(w.storagePrefix, nestedSubKeys...) return w.storageClient.ConstructReference(ctx, w.storageClient.GetBaseContainerFQN(ctx), nestedKeys...) @@ -136,10 +136,10 @@ func (w *WorkflowManager) CreateWorkflow( if err := validation.ValidateWorkflow(ctx, request, w.db, w.config.ApplicationConfiguration()); err != nil { return nil, err } - ctx = getWorkflowContext(ctx, request.Id) + ctx = getWorkflowContext(ctx, request.GetId()) finalizedRequest, err := w.setDefaults(request) if err != nil { - logger.Debugf(ctx, "Failed to set defaults for workflow with id [%+v] with err %v", request.Id, err) + logger.Debugf(ctx, "Failed to set defaults for workflow with id [%+v] with err %v", request.GetId(), err) return nil, err } // Validate that the workflow compiles. @@ -147,21 +147,21 @@ func (w *WorkflowManager) CreateWorkflow( if err != nil { logger.Errorf(ctx, "Failed to compile workflow with err: %v", err) return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, - "failed to compile workflow for [%+v] with err: %v", request.Id, err) + "failed to compile workflow for [%+v] with err: %v", request.GetId(), err) } err = validation.ValidateCompiledWorkflow( - request.Id, workflowClosure, w.config.RegistrationValidationConfiguration()) + request.GetId(), workflowClosure, w.config.RegistrationValidationConfiguration()) if err != nil { return nil, err } - workflowDigest, err := util.GetWorkflowDigest(ctx, workflowClosure.CompiledWorkflow) + workflowDigest, err := util.GetWorkflowDigest(ctx, workflowClosure.GetCompiledWorkflow()) if err != nil { logger.Errorf(ctx, "failed to compute workflow digest with err %v", err) return nil, err } // Assert that a matching workflow doesn't already exist before uploading the workflow closure. - existingWorkflowModel, err := util.GetWorkflowModel(ctx, w.db, request.Id) + existingWorkflowModel, err := util.GetWorkflowModel(ctx, w.db, request.GetId()) // Check that no identical or conflicting workflows exist. if err == nil { // A workflow's structure is uniquely defined by its collection of nodes. @@ -174,29 +174,29 @@ func (w *WorkflowManager) CreateWorkflow( return nil, transformerErr } // A workflow exists with different structure - return nil, errors.NewWorkflowExistsDifferentStructureError(ctx, request, existingWorkflow.Closure.GetCompiledWorkflow(), workflowClosure.GetCompiledWorkflow()) + return nil, errors.NewWorkflowExistsDifferentStructureError(ctx, request, existingWorkflow.GetClosure().GetCompiledWorkflow(), workflowClosure.GetCompiledWorkflow()) } else if flyteAdminError, ok := err.(errors.FlyteAdminError); !ok || flyteAdminError.Code() != codes.NotFound { logger.Debugf(ctx, "Failed to get workflow for comparison in CreateWorkflow with ID [%+v] with err %v", - request.Id, err) + request.GetId(), err) return nil, err } - remoteClosureDataRef, err := w.createDataReference(ctx, request.Spec.Template.Id) + remoteClosureDataRef, err := w.createDataReference(ctx, request.GetSpec().GetTemplate().GetId()) if err != nil { logger.Infof(ctx, "failed to construct data reference for workflow closure with id [%+v] with err %v", - request.Id, err) + request.GetId(), err) return nil, errors.NewFlyteAdminErrorf(codes.Internal, - "failed to construct data reference for workflow closure with id [%+v] and err %v", request.Id, err) + "failed to construct data reference for workflow closure with id [%+v] and err %v", request.GetId(), err) } err = w.storageClient.WriteProtobuf(ctx, remoteClosureDataRef, defaultStorageOptions, workflowClosure) if err != nil { logger.Infof(ctx, "failed to write marshaled workflow with id [%+v] to storage %s with err %v and base container: %s", - request.Id, remoteClosureDataRef.String(), err, w.storageClient.GetBaseContainerFQN(ctx)) + request.GetId(), remoteClosureDataRef.String(), err, w.storageClient.GetBaseContainerFQN(ctx)) return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to write marshaled workflow [%+v] to storage %s with err %v and base container: %s", - request.Id, remoteClosureDataRef.String(), err, w.storageClient.GetBaseContainerFQN(ctx)) + request.GetId(), remoteClosureDataRef.String(), err, w.storageClient.GetBaseContainerFQN(ctx)) } // Save the workflow & its reference to the offloaded, compiled workflow in the database. workflowModel, err := transformers.CreateWorkflowModel( @@ -207,17 +207,17 @@ func (w *WorkflowManager) CreateWorkflow( finalizedRequest, remoteClosureDataRef.String(), err) return nil, err } - descriptionModel, err := transformers.CreateDescriptionEntityModel(request.Spec.Description, request.Id) + descriptionModel, err := transformers.CreateDescriptionEntityModel(request.GetSpec().GetDescription(), request.GetId()) if err != nil { logger.Errorf(ctx, - "Failed to transform description model [%+v] with err: %v", request.Spec.Description, err) + "Failed to transform description model [%+v] with err: %v", request.GetSpec().GetDescription(), err) return nil, err } if descriptionModel != nil { workflowModel.ShortDescription = descriptionModel.ShortDescription } if err = w.db.WorkflowRepo().Create(ctx, workflowModel, descriptionModel); err != nil { - logger.Infof(ctx, "Failed to create workflow model [%+v] with err %v", request.Id, err) + logger.Infof(ctx, "Failed to create workflow model [%+v] with err %v", request.GetId(), err) return nil, err } w.metrics.TypedInterfaceSizeBytes.Observe(float64(len(workflowModel.TypedInterface))) @@ -226,14 +226,14 @@ func (w *WorkflowManager) CreateWorkflow( } func (w *WorkflowManager) GetWorkflow(ctx context.Context, request *admin.ObjectGetRequest) (*admin.Workflow, error) { - if err := validation.ValidateIdentifier(request.Id, common.Workflow); err != nil { - logger.Debugf(ctx, "invalid identifier [%+v]: %v", request.Id, err) + if err := validation.ValidateIdentifier(request.GetId(), common.Workflow); err != nil { + logger.Debugf(ctx, "invalid identifier [%+v]: %v", request.GetId(), err) return nil, err } - ctx = getWorkflowContext(ctx, request.Id) - workflow, err := util.GetWorkflow(ctx, w.db, w.storageClient, request.Id) + ctx = getWorkflowContext(ctx, request.GetId()) + workflow, err := util.GetWorkflow(ctx, w.db, w.storageClient, request.GetId()) if err != nil { - logger.Infof(ctx, "Failed to get workflow with id [%+v] with err %v", request.Id, err) + logger.Infof(ctx, "Failed to get workflow with id [%+v] with err %v", request.GetId(), err) return nil, err } return workflow, nil @@ -246,37 +246,37 @@ func (w *WorkflowManager) ListWorkflows( if err := validation.ValidateResourceListRequest(request); err != nil { return nil, err } - ctx = contextutils.WithProjectDomain(ctx, request.Id.Project, request.Id.Domain) - ctx = contextutils.WithWorkflowID(ctx, request.Id.Name) + ctx = contextutils.WithProjectDomain(ctx, request.GetId().GetProject(), request.GetId().GetDomain()) + ctx = contextutils.WithWorkflowID(ctx, request.GetId().GetName()) filters, err := util.GetDbFilters(util.FilterSpec{ - Project: request.Id.Project, - Domain: request.Id.Domain, - Name: request.Id.Name, - RequestFilters: request.Filters, + Project: request.GetId().GetProject(), + Domain: request.GetId().GetDomain(), + Name: request.GetId().GetName(), + RequestFilters: request.GetFilters(), }, common.Workflow) if err != nil { return nil, err } - sortParameter, err := common.NewSortParameter(request.SortBy, models.WorkflowColumns) + sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.WorkflowColumns) if err != nil { return nil, err } - offset, err := validation.ValidateToken(request.Token) + offset, err := validation.ValidateToken(request.GetToken()) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, - "invalid pagination token %s for ListWorkflows", request.Token) + "invalid pagination token %s for ListWorkflows", request.GetToken()) } listWorkflowsInput := repoInterfaces.ListResourceInput{ - Limit: int(request.Limit), + Limit: int(request.GetLimit()), Offset: offset, InlineFilters: filters, SortParameter: sortParameter, } output, err := w.db.WorkflowRepo().List(ctx, listWorkflowsInput) if err != nil { - logger.Debugf(ctx, "Failed to list workflows with [%+v] with err %v", request.Id, err) + logger.Debugf(ctx, "Failed to list workflows with [%+v] with err %v", request.GetId(), err) return nil, err } workflowList, err := transformers.FromWorkflowModels(output.Workflows) @@ -286,7 +286,7 @@ func (w *WorkflowManager) ListWorkflows( return nil, err } var token string - if len(output.Workflows) == int(request.Limit) { + if len(output.Workflows) == int(request.GetLimit()) { token = strconv.Itoa(offset + len(output.Workflows)) } return &admin.WorkflowList{ @@ -301,28 +301,28 @@ func (w *WorkflowManager) ListWorkflowIdentifiers(ctx context.Context, request * logger.Debugf(ctx, "invalid request [%+v]: %v", request, err) return nil, err } - ctx = contextutils.WithProjectDomain(ctx, request.Project, request.Domain) + ctx = contextutils.WithProjectDomain(ctx, request.GetProject(), request.GetDomain()) filters, err := util.GetDbFilters(util.FilterSpec{ - Project: request.Project, - Domain: request.Domain, + Project: request.GetProject(), + Domain: request.GetDomain(), }, common.Workflow) if err != nil { return nil, err } - sortParameter, err := common.NewSortParameter(request.SortBy, models.WorkflowColumns) + sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.WorkflowColumns) if err != nil { return nil, err } - offset, err := validation.ValidateToken(request.Token) + offset, err := validation.ValidateToken(request.GetToken()) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, - "invalid pagination token %s for ListWorkflowIdentifiers", request.Token) + "invalid pagination token %s for ListWorkflowIdentifiers", request.GetToken()) } listWorkflowsInput := repoInterfaces.ListResourceInput{ - Limit: int(request.Limit), + Limit: int(request.GetLimit()), Offset: offset, InlineFilters: filters, SortParameter: sortParameter, @@ -331,12 +331,12 @@ func (w *WorkflowManager) ListWorkflowIdentifiers(ctx context.Context, request * output, err := w.db.WorkflowRepo().ListIdentifiers(ctx, listWorkflowsInput) if err != nil { logger.Debugf(ctx, "Failed to list workflow ids with project: %s and domain: %s with err %v", - request.Project, request.Domain, err) + request.GetProject(), request.GetDomain(), err) return nil, err } var token string - if len(output.Workflows) == int(request.Limit) { + if len(output.Workflows) == int(request.GetLimit()) { token = strconv.Itoa(offset + len(output.Workflows)) } entities := transformers.FromWorkflowModelsToIdentifiers(output.Workflows) diff --git a/flyteadmin/pkg/manager/impl/workflow_manager_test.go b/flyteadmin/pkg/manager/impl/workflow_manager_test.go index 8072453bbd..280624a956 100644 --- a/flyteadmin/pkg/manager/impl/workflow_manager_test.go +++ b/flyteadmin/pkg/manager/impl/workflow_manager_test.go @@ -128,7 +128,7 @@ func TestSetWorkflowDefaults(t *testing.T) { request := testutils.GetWorkflowRequest() finalizedRequest, err := workflowManager.(*WorkflowManager).setDefaults(request) assert.NoError(t, err) - assert.True(t, proto.Equal(workflowIdentifier, finalizedRequest.Spec.Template.Id)) + assert.True(t, proto.Equal(workflowIdentifier, finalizedRequest.GetSpec().GetTemplate().GetId())) } func TestCreateWorkflow(t *testing.T) { @@ -309,12 +309,12 @@ func TestGetWorkflow(t *testing.T) { Id: workflowIdentifier, }) assert.NoError(t, err) - assert.Equal(t, "project", workflow.Id.Project) - assert.Equal(t, "domain", workflow.Id.Domain) - assert.Equal(t, "name", workflow.Id.Name) - assert.Equal(t, "version", workflow.Id.Version) - assert.True(t, proto.Equal(testutils.GetWorkflowClosure(), workflow.Closure), - "%+v !=\n %+v", testutils.GetWorkflowClosure(), workflow.Closure) + assert.Equal(t, "project", workflow.GetId().GetProject()) + assert.Equal(t, "domain", workflow.GetId().GetDomain()) + assert.Equal(t, "name", workflow.GetId().GetName()) + assert.Equal(t, "version", workflow.GetId().GetVersion()) + assert.True(t, proto.Equal(testutils.GetWorkflowClosure(), workflow.GetClosure()), + "%+v !=\n %+v", testutils.GetWorkflowClosure(), workflow.GetClosure()) } func TestGetWorkflow_DatabaseError(t *testing.T) { @@ -450,13 +450,13 @@ func TestListWorkflows(t *testing.T) { }) assert.NoError(t, err) assert.NotNil(t, workflowList) - assert.Len(t, workflowList.Workflows, 2) + assert.Len(t, workflowList.GetWorkflows(), 2) - for idx, workflow := range workflowList.Workflows { - assert.Equal(t, projectValue, workflow.Id.Project) - assert.Equal(t, domainValue, workflow.Id.Domain) - assert.Equal(t, nameValue, workflow.Id.Name) - assert.Equal(t, fmt.Sprintf("version %v", idx), workflow.Id.Version) + for idx, workflow := range workflowList.GetWorkflows() { + assert.Equal(t, projectValue, workflow.GetId().GetProject()) + assert.Equal(t, domainValue, workflow.GetId().GetDomain()) + assert.Equal(t, nameValue, workflow.GetId().GetName()) + assert.Equal(t, fmt.Sprintf("version %v", idx), workflow.GetId().GetVersion()) assert.True(t, proto.Equal(&admin.WorkflowClosure{ CreatedAt: testutils.MockCreatedAtProto, CompiledWorkflow: &core.CompiledWorkflowClosure{ @@ -466,9 +466,9 @@ func TestListWorkflows(t *testing.T) { }, }, }, - }, workflow.Closure)) + }, workflow.GetClosure())) } - assert.Empty(t, workflowList.Token) + assert.Empty(t, workflowList.GetToken()) } func TestListWorkflows_MissingParameters(t *testing.T) { @@ -584,11 +584,11 @@ func TestWorkflowManager_ListWorkflowIdentifiers(t *testing.T) { }) assert.NoError(t, err) assert.NotNil(t, workflowList) - assert.Len(t, workflowList.Entities, 2) + assert.Len(t, workflowList.GetEntities(), 2) - for _, entity := range workflowList.Entities { - assert.Equal(t, projectValue, entity.Project) - assert.Equal(t, domainValue, entity.Domain) - assert.Equal(t, nameValue, entity.Name) + for _, entity := range workflowList.GetEntities() { + assert.Equal(t, projectValue, entity.GetProject()) + assert.Equal(t, domainValue, entity.GetDomain()) + assert.Equal(t, nameValue, entity.GetName()) } } diff --git a/flyteadmin/pkg/repositories/gormimpl/common.go b/flyteadmin/pkg/repositories/gormimpl/common.go index b103ef0e43..7f4d4f370a 100644 --- a/flyteadmin/pkg/repositories/gormimpl/common.go +++ b/flyteadmin/pkg/repositories/gormimpl/common.go @@ -52,17 +52,14 @@ var entityToTableName = map[common.Entity]string{ } var innerJoinExecToNodeExec = fmt.Sprintf( - "INNER JOIN %[1]s ON %[2]s.execution_project = %[1]s.execution_project AND "+ - "%[2]s.execution_domain = %[1]s.execution_domain AND %[2]s.execution_name = %[1]s.execution_name", + "INNER JOIN %[1]s ON %[2]s.execution_project = %[1]s.execution_project AND %[2]s.execution_domain = %[1]s.execution_domain AND %[2]s.execution_name = %[1]s.execution_name", executionTableName, nodeExecutionTableName) var innerJoinExecToTaskExec = fmt.Sprintf( - "INNER JOIN %[1]s ON %[2]s.execution_project = %[1]s.execution_project AND "+ - "%[2]s.execution_domain = %[1]s.execution_domain AND %[2]s.execution_name = %[1]s.execution_name", + "INNER JOIN %[1]s ON %[2]s.execution_project = %[1]s.execution_project AND %[2]s.execution_domain = %[1]s.execution_domain AND %[2]s.execution_name = %[1]s.execution_name", executionTableName, taskExecutionTableName) var innerJoinNodeExecToTaskExec = fmt.Sprintf( - "INNER JOIN %[1]s ON %s.node_id = %[1]s.node_id AND %[2]s.execution_project = %[1]s.execution_project AND "+ - "%[2]s.execution_domain = %[1]s.execution_domain AND %[2]s.execution_name = %[1]s.execution_name", + "INNER JOIN %[1]s ON %s.node_id = %[1]s.node_id AND %[2]s.execution_project = %[1]s.execution_project AND %[2]s.execution_domain = %[1]s.execution_domain AND %[2]s.execution_name = %[1]s.execution_name", nodeExecutionTableName, taskExecutionTableName) // Because dynamic tasks do NOT necessarily register static task definitions, we use a left join to not exclude diff --git a/flyteadmin/pkg/repositories/gormimpl/node_execution_repo.go b/flyteadmin/pkg/repositories/gormimpl/node_execution_repo.go index b1772862dc..f39c6df554 100644 --- a/flyteadmin/pkg/repositories/gormimpl/node_execution_repo.go +++ b/flyteadmin/pkg/repositories/gormimpl/node_execution_repo.go @@ -36,11 +36,11 @@ func (r *NodeExecutionRepo) Get(ctx context.Context, input interfaces.NodeExecut timer := r.metrics.GetDuration.Start() tx := r.db.WithContext(ctx).Where(&models.NodeExecution{ NodeExecutionKey: models.NodeExecutionKey{ - NodeID: input.NodeExecutionIdentifier.NodeId, + NodeID: input.NodeExecutionIdentifier.GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: input.NodeExecutionIdentifier.ExecutionId.Project, - Domain: input.NodeExecutionIdentifier.ExecutionId.Domain, - Name: input.NodeExecutionIdentifier.ExecutionId.Name, + Project: input.NodeExecutionIdentifier.GetExecutionId().GetProject(), + Domain: input.NodeExecutionIdentifier.GetExecutionId().GetDomain(), + Name: input.NodeExecutionIdentifier.GetExecutionId().GetName(), }, }, }).Take(&nodeExecution) @@ -49,11 +49,11 @@ func (r *NodeExecutionRepo) Get(ctx context.Context, input interfaces.NodeExecut if tx.Error != nil && errors.Is(tx.Error, gorm.ErrRecordNotFound) { return models.NodeExecution{}, adminErrors.GetMissingEntityError("node execution", &core.NodeExecutionIdentifier{ - NodeId: input.NodeExecutionIdentifier.NodeId, + NodeId: input.NodeExecutionIdentifier.GetNodeId(), ExecutionId: &core.WorkflowExecutionIdentifier{ - Project: input.NodeExecutionIdentifier.ExecutionId.Project, - Domain: input.NodeExecutionIdentifier.ExecutionId.Domain, - Name: input.NodeExecutionIdentifier.ExecutionId.Name, + Project: input.NodeExecutionIdentifier.GetExecutionId().GetProject(), + Domain: input.NodeExecutionIdentifier.GetExecutionId().GetDomain(), + Name: input.NodeExecutionIdentifier.GetExecutionId().GetName(), }, }) } else if tx.Error != nil { @@ -68,11 +68,11 @@ func (r *NodeExecutionRepo) GetWithChildren(ctx context.Context, input interface timer := r.metrics.GetDuration.Start() tx := r.db.WithContext(ctx).Where(&models.NodeExecution{ NodeExecutionKey: models.NodeExecutionKey{ - NodeID: input.NodeExecutionIdentifier.NodeId, + NodeID: input.NodeExecutionIdentifier.GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: input.NodeExecutionIdentifier.ExecutionId.Project, - Domain: input.NodeExecutionIdentifier.ExecutionId.Domain, - Name: input.NodeExecutionIdentifier.ExecutionId.Name, + Project: input.NodeExecutionIdentifier.GetExecutionId().GetProject(), + Domain: input.NodeExecutionIdentifier.GetExecutionId().GetDomain(), + Name: input.NodeExecutionIdentifier.GetExecutionId().GetName(), }, }, }).Preload("ChildNodeExecutions").Take(&nodeExecution) @@ -81,11 +81,11 @@ func (r *NodeExecutionRepo) GetWithChildren(ctx context.Context, input interface if tx.Error != nil && errors.Is(tx.Error, gorm.ErrRecordNotFound) { return models.NodeExecution{}, adminErrors.GetMissingEntityError("node execution", &core.NodeExecutionIdentifier{ - NodeId: input.NodeExecutionIdentifier.NodeId, + NodeId: input.NodeExecutionIdentifier.GetNodeId(), ExecutionId: &core.WorkflowExecutionIdentifier{ - Project: input.NodeExecutionIdentifier.ExecutionId.Project, - Domain: input.NodeExecutionIdentifier.ExecutionId.Domain, - Name: input.NodeExecutionIdentifier.ExecutionId.Name, + Project: input.NodeExecutionIdentifier.GetExecutionId().GetProject(), + Domain: input.NodeExecutionIdentifier.GetExecutionId().GetDomain(), + Name: input.NodeExecutionIdentifier.GetExecutionId().GetName(), }, }) } else if tx.Error != nil { @@ -144,11 +144,11 @@ func (r *NodeExecutionRepo) Exists(ctx context.Context, input interfaces.NodeExe timer := r.metrics.ExistsDuration.Start() tx := r.db.WithContext(ctx).Select(ID).Where(&models.NodeExecution{ NodeExecutionKey: models.NodeExecutionKey{ - NodeID: input.NodeExecutionIdentifier.NodeId, + NodeID: input.NodeExecutionIdentifier.GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: input.NodeExecutionIdentifier.ExecutionId.Project, - Domain: input.NodeExecutionIdentifier.ExecutionId.Domain, - Name: input.NodeExecutionIdentifier.ExecutionId.Name, + Project: input.NodeExecutionIdentifier.GetExecutionId().GetProject(), + Domain: input.NodeExecutionIdentifier.GetExecutionId().GetDomain(), + Name: input.NodeExecutionIdentifier.GetExecutionId().GetName(), }, }, }).Take(&nodeExecution) diff --git a/flyteadmin/pkg/repositories/gormimpl/task_execution_repo.go b/flyteadmin/pkg/repositories/gormimpl/task_execution_repo.go index d4d30bef85..ba473c2968 100644 --- a/flyteadmin/pkg/repositories/gormimpl/task_execution_repo.go +++ b/flyteadmin/pkg/repositories/gormimpl/task_execution_repo.go @@ -37,17 +37,17 @@ func (r *TaskExecutionRepo) Get(ctx context.Context, input interfaces.GetTaskExe tx := r.db.WithContext(ctx).Where(&models.TaskExecution{ TaskExecutionKey: models.TaskExecutionKey{ TaskKey: models.TaskKey{ - Project: input.TaskExecutionID.TaskId.Project, - Domain: input.TaskExecutionID.TaskId.Domain, - Name: input.TaskExecutionID.TaskId.Name, - Version: input.TaskExecutionID.TaskId.Version, + Project: input.TaskExecutionID.GetTaskId().GetProject(), + Domain: input.TaskExecutionID.GetTaskId().GetDomain(), + Name: input.TaskExecutionID.GetTaskId().GetName(), + Version: input.TaskExecutionID.GetTaskId().GetVersion(), }, NodeExecutionKey: models.NodeExecutionKey{ - NodeID: input.TaskExecutionID.NodeExecutionId.NodeId, + NodeID: input.TaskExecutionID.GetNodeExecutionId().GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: input.TaskExecutionID.NodeExecutionId.ExecutionId.Project, - Domain: input.TaskExecutionID.NodeExecutionId.ExecutionId.Domain, - Name: input.TaskExecutionID.NodeExecutionId.ExecutionId.Name, + Project: input.TaskExecutionID.GetNodeExecutionId().GetExecutionId().GetProject(), + Domain: input.TaskExecutionID.GetNodeExecutionId().GetExecutionId().GetDomain(), + Name: input.TaskExecutionID.GetNodeExecutionId().GetExecutionId().GetName(), }, }, RetryAttempt: &input.TaskExecutionID.RetryAttempt, @@ -59,17 +59,17 @@ func (r *TaskExecutionRepo) Get(ctx context.Context, input interfaces.GetTaskExe return models.TaskExecution{}, flyteAdminDbErrors.GetMissingEntityError("task execution", &core.TaskExecutionIdentifier{ TaskId: &core.Identifier{ - Project: input.TaskExecutionID.TaskId.Project, - Domain: input.TaskExecutionID.TaskId.Domain, - Name: input.TaskExecutionID.TaskId.Name, - Version: input.TaskExecutionID.TaskId.Version, + Project: input.TaskExecutionID.GetTaskId().GetProject(), + Domain: input.TaskExecutionID.GetTaskId().GetDomain(), + Name: input.TaskExecutionID.GetTaskId().GetName(), + Version: input.TaskExecutionID.GetTaskId().GetVersion(), }, NodeExecutionId: &core.NodeExecutionIdentifier{ - NodeId: input.TaskExecutionID.NodeExecutionId.NodeId, + NodeId: input.TaskExecutionID.GetNodeExecutionId().GetNodeId(), ExecutionId: &core.WorkflowExecutionIdentifier{ - Project: input.TaskExecutionID.NodeExecutionId.ExecutionId.Project, - Domain: input.TaskExecutionID.NodeExecutionId.ExecutionId.Domain, - Name: input.TaskExecutionID.NodeExecutionId.ExecutionId.Name, + Project: input.TaskExecutionID.GetNodeExecutionId().GetExecutionId().GetProject(), + Domain: input.TaskExecutionID.GetNodeExecutionId().GetExecutionId().GetDomain(), + Name: input.TaskExecutionID.GetNodeExecutionId().GetExecutionId().GetName(), }, }, }) diff --git a/flyteadmin/pkg/repositories/transformers/description_entity.go b/flyteadmin/pkg/repositories/transformers/description_entity.go index d3a816179b..8e16ef0c6d 100644 --- a/flyteadmin/pkg/repositories/transformers/description_entity.go +++ b/flyteadmin/pkg/repositories/transformers/description_entity.go @@ -26,34 +26,34 @@ func CreateDescriptionEntityModel( var sourceCode models.SourceCode var err error - if descriptionEntity.LongDescription != nil { - longDescriptionBytes, err = proto.Marshal(descriptionEntity.LongDescription) + if descriptionEntity.GetLongDescription() != nil { + longDescriptionBytes, err = proto.Marshal(descriptionEntity.GetLongDescription()) if err != nil { logger.Errorf(ctx, "Failed to marshal LongDescription with error: %v", err) return nil, err } } - if descriptionEntity.LongDescription != nil { - longDescriptionBytes, err = proto.Marshal(descriptionEntity.LongDescription) + if descriptionEntity.GetLongDescription() != nil { + longDescriptionBytes, err = proto.Marshal(descriptionEntity.GetLongDescription()) if err != nil { logger.Errorf(ctx, "Failed to marshal LongDescription with error: %v", err) return nil, err } } - if descriptionEntity.SourceCode != nil { - sourceCode = models.SourceCode{Link: descriptionEntity.SourceCode.Link} + if descriptionEntity.GetSourceCode() != nil { + sourceCode = models.SourceCode{Link: descriptionEntity.GetSourceCode().GetLink()} } return &models.DescriptionEntity{ DescriptionEntityKey: models.DescriptionEntityKey{ - ResourceType: id.ResourceType, - Project: id.Project, - Domain: id.Domain, - Name: id.Name, - Version: id.Version, + ResourceType: id.GetResourceType(), + Project: id.GetProject(), + Domain: id.GetDomain(), + Name: id.GetName(), + Version: id.GetVersion(), }, - ShortDescription: descriptionEntity.ShortDescription, + ShortDescription: descriptionEntity.GetShortDescription(), LongDescription: longDescriptionBytes, SourceCode: sourceCode, }, nil diff --git a/flyteadmin/pkg/repositories/transformers/description_entity_test.go b/flyteadmin/pkg/repositories/transformers/description_entity_test.go index 9279ff0f65..b8feeb91b6 100644 --- a/flyteadmin/pkg/repositories/transformers/description_entity_test.go +++ b/flyteadmin/pkg/repositories/transformers/description_entity_test.go @@ -37,7 +37,7 @@ func TestToDescriptionEntityExecutionModel(t *testing.T) { assert.Nil(t, err) assert.Equal(t, shortDescription, model.ShortDescription) assert.Equal(t, longDescriptionBytes, model.LongDescription) - assert.Equal(t, sourceCode.Link, model.Link) + assert.Equal(t, sourceCode.GetLink(), model.Link) } func TestFromDescriptionEntityExecutionModel(t *testing.T) { @@ -59,9 +59,9 @@ func TestFromDescriptionEntityExecutionModel(t *testing.T) { SourceCode: models.SourceCode{Link: "https://github/flyte"}, }) assert.Nil(t, err) - assert.Equal(t, descriptionEntity.ShortDescription, shortDescription) - assert.Equal(t, descriptionEntity.LongDescription.IconLink, longDescription.IconLink) - assert.Equal(t, descriptionEntity.SourceCode, sourceCode) + assert.Equal(t, descriptionEntity.GetShortDescription(), shortDescription) + assert.Equal(t, descriptionEntity.GetLongDescription().GetIconLink(), longDescription.GetIconLink()) + assert.Equal(t, descriptionEntity.GetSourceCode(), sourceCode) } func TestFromDescriptionEntityExecutionModels(t *testing.T) { @@ -85,7 +85,7 @@ func TestFromDescriptionEntityExecutionModels(t *testing.T) { }, }) assert.Nil(t, err) - assert.Equal(t, descriptionEntity[0].ShortDescription, shortDescription) - assert.Equal(t, descriptionEntity[0].LongDescription.IconLink, longDescription.IconLink) - assert.Equal(t, descriptionEntity[0].SourceCode, sourceCode) + assert.Equal(t, descriptionEntity[0].GetShortDescription(), shortDescription) + assert.Equal(t, descriptionEntity[0].GetLongDescription().GetIconLink(), longDescription.GetIconLink()) + assert.Equal(t, descriptionEntity[0].GetSourceCode(), sourceCode) } diff --git a/flyteadmin/pkg/repositories/transformers/execution.go b/flyteadmin/pkg/repositories/transformers/execution.go index 711f6bdddb..8943d2303b 100644 --- a/flyteadmin/pkg/repositories/transformers/execution.go +++ b/flyteadmin/pkg/repositories/transformers/execution.go @@ -61,7 +61,7 @@ var ListExecutionTransformerOptions = &ExecutionTransformerOptions{ // CreateExecutionModel transforms a ExecutionCreateRequest to a Execution model func CreateExecutionModel(input CreateExecutionModelInput) (*models.Execution, error) { requestSpec := input.RequestSpec - if requestSpec.Metadata == nil { + if requestSpec.GetMetadata() == nil { requestSpec.Metadata = &admin.ExecutionMetadata{} } requestSpec.Metadata.SystemMetadata = &admin.SystemMetadata{ @@ -81,7 +81,7 @@ func CreateExecutionModel(input CreateExecutionModelInput) (*models.Execution, e WorkflowId: input.WorkflowIdentifier, StateChangeDetails: &admin.ExecutionStateChangeDetails{ State: admin.ExecutionState_EXECUTION_ACTIVE, - Principal: requestSpec.Metadata.Principal, + Principal: requestSpec.GetMetadata().GetPrincipal(), OccurredAt: createdAt, }, } @@ -114,12 +114,12 @@ func CreateExecutionModel(input CreateExecutionModelInput) (*models.Execution, e executionModel := &models.Execution{ ExecutionKey: models.ExecutionKey{ - Project: input.WorkflowExecutionID.Project, - Domain: input.WorkflowExecutionID.Domain, - Name: input.WorkflowExecutionID.Name, + Project: input.WorkflowExecutionID.GetProject(), + Domain: input.WorkflowExecutionID.GetDomain(), + Name: input.WorkflowExecutionID.GetName(), }, Spec: spec, - Phase: closure.Phase.String(), + Phase: closure.GetPhase().String(), Closure: closureBytes, WorkflowID: input.WorkflowID, ExecutionCreatedAt: &input.CreatedAt, @@ -129,7 +129,7 @@ func CreateExecutionModel(input CreateExecutionModelInput) (*models.Execution, e Cluster: input.Cluster, InputsURI: input.InputsURI, UserInputsURI: input.UserInputsURI, - User: requestSpec.Metadata.Principal, + User: requestSpec.GetMetadata().GetPrincipal(), State: &activeExecution, LaunchEntity: strings.ToLower(input.LaunchEntity.String()), } @@ -140,8 +140,8 @@ func CreateExecutionModel(input CreateExecutionModelInput) (*models.Execution, e } else { executionModel.LaunchPlanID = input.LaunchPlanID } - if input.RequestSpec.Metadata != nil { - executionModel.Mode = int32(input.RequestSpec.Metadata.Mode) + if input.RequestSpec.GetMetadata() != nil { + executionModel.Mode = int32(input.RequestSpec.GetMetadata().GetMode()) } return executionModel, nil @@ -151,13 +151,13 @@ func CreateExecutionModel(input CreateExecutionModelInput) (*models.Execution, e func CreateExecutionTagModel(input CreateExecutionModelInput) ([]*models.ExecutionTag, error) { tags := make([]*models.ExecutionTag, 0) - if input.RequestSpec.Labels != nil { - for k, v := range input.RequestSpec.Labels.Values { + if input.RequestSpec.GetLabels() != nil { + for k, v := range input.RequestSpec.GetLabels().GetValues() { tags = append(tags, &models.ExecutionTag{ ExecutionKey: models.ExecutionKey{ - Project: input.WorkflowExecutionID.Project, - Domain: input.WorkflowExecutionID.Domain, - Name: input.WorkflowExecutionID.Name, + Project: input.WorkflowExecutionID.GetProject(), + Domain: input.WorkflowExecutionID.GetDomain(), + Name: input.WorkflowExecutionID.GetName(), }, Key: k, Value: v, @@ -165,12 +165,12 @@ func CreateExecutionTagModel(input CreateExecutionModelInput) ([]*models.Executi } } - for _, v := range input.RequestSpec.Tags { + for _, v := range input.RequestSpec.GetTags() { tags = append(tags, &models.ExecutionTag{ ExecutionKey: models.ExecutionKey{ - Project: input.WorkflowExecutionID.Project, - Domain: input.WorkflowExecutionID.Domain, - Name: input.WorkflowExecutionID.Name, + Project: input.WorkflowExecutionID.GetProject(), + Domain: input.WorkflowExecutionID.GetDomain(), + Name: input.WorkflowExecutionID.GetName(), }, Key: v, Value: "", @@ -189,10 +189,10 @@ func reassignCluster(ctx context.Context, cluster string, executionID *core.Work if err != nil { return flyteErrs.NewFlyteAdminErrorf(codes.Internal, "Failed to unmarshal execution spec: %v", err) } - if executionSpec.Metadata == nil { + if executionSpec.GetMetadata() == nil { executionSpec.Metadata = &admin.ExecutionMetadata{} } - if executionSpec.Metadata.SystemMetadata == nil { + if executionSpec.GetMetadata().GetSystemMetadata() == nil { executionSpec.Metadata.SystemMetadata = &admin.SystemMetadata{} } executionSpec.Metadata.SystemMetadata.ExecutionCluster = cluster @@ -214,64 +214,64 @@ func UpdateExecutionModelState( if err != nil { return flyteErrs.NewFlyteAdminErrorf(codes.Internal, "Failed to unmarshal execution closure: %v", err) } - executionClosure.Phase = request.Event.Phase - executionClosure.UpdatedAt = request.Event.OccurredAt - execution.Phase = request.Event.Phase.String() + executionClosure.Phase = request.GetEvent().GetPhase() + executionClosure.UpdatedAt = request.GetEvent().GetOccurredAt() + execution.Phase = request.GetEvent().GetPhase().String() - occurredAtTimestamp, err := ptypes.Timestamp(request.Event.OccurredAt) + occurredAtTimestamp, err := ptypes.Timestamp(request.GetEvent().GetOccurredAt()) if err != nil { return flyteErrs.NewFlyteAdminErrorf(codes.Internal, "Failed to parse OccurredAt: %v", err) } execution.ExecutionUpdatedAt = &occurredAtTimestamp // only mark the execution started when we get the initial running event - if request.Event.Phase == core.WorkflowExecution_RUNNING { + if request.GetEvent().GetPhase() == core.WorkflowExecution_RUNNING { execution.StartedAt = &occurredAtTimestamp - executionClosure.StartedAt = request.Event.OccurredAt - } else if common.IsExecutionTerminal(request.Event.Phase) { + executionClosure.StartedAt = request.GetEvent().GetOccurredAt() + } else if common.IsExecutionTerminal(request.GetEvent().GetPhase()) { if execution.StartedAt != nil { execution.Duration = occurredAtTimestamp.Sub(*execution.StartedAt) executionClosure.Duration = ptypes.DurationProto(execution.Duration) } else { logger.Infof(context.Background(), - "Cannot compute duration because startedAt was never set, requestId: %v", request.RequestId) + "Cannot compute duration because startedAt was never set, requestId: %v", request.GetRequestId()) } } // Default or empty cluster values do not require updating the execution model. - ignoreClusterFromEvent := len(request.Event.ProducerId) == 0 || request.Event.ProducerId == common.DefaultProducerID - logger.Debugf(ctx, "Producer Id [%v]. IgnoreClusterFromEvent [%v]", request.Event.ProducerId, ignoreClusterFromEvent) + ignoreClusterFromEvent := len(request.GetEvent().GetProducerId()) == 0 || request.GetEvent().GetProducerId() == common.DefaultProducerID + logger.Debugf(ctx, "Producer Id [%v]. IgnoreClusterFromEvent [%v]", request.GetEvent().GetProducerId(), ignoreClusterFromEvent) if !ignoreClusterFromEvent { if clusterReassignablePhases.Has(execution.Phase) { - if err := reassignCluster(ctx, request.Event.ProducerId, request.Event.ExecutionId, execution); err != nil { + if err := reassignCluster(ctx, request.GetEvent().GetProducerId(), request.GetEvent().GetExecutionId(), execution); err != nil { return err } - } else if execution.Cluster != request.Event.ProducerId { + } else if execution.Cluster != request.GetEvent().GetProducerId() { errorMsg := fmt.Sprintf("Cannot accept events for running/terminated execution [%v] from cluster [%s],"+ "expected events to originate from [%s]", - request.Event.ExecutionId, request.Event.ProducerId, execution.Cluster) + request.GetEvent().GetExecutionId(), request.GetEvent().GetProducerId(), execution.Cluster) return flyteErrs.NewIncompatibleClusterError(ctx, errorMsg, execution.Cluster) } } - if request.Event.GetOutputUri() != "" { + if request.GetEvent().GetOutputUri() != "" { executionClosure.OutputResult = &admin.ExecutionClosure_Outputs{ Outputs: &admin.LiteralMapBlob{ Data: &admin.LiteralMapBlob_Uri{ - Uri: request.Event.GetOutputUri(), + Uri: request.GetEvent().GetOutputUri(), }, }, } - } else if request.Event.GetOutputData() != nil { + } else if request.GetEvent().GetOutputData() != nil { switch inlineEventDataPolicy { case interfaces.InlineEventDataPolicyStoreInline: executionClosure.OutputResult = &admin.ExecutionClosure_OutputData{ - OutputData: request.Event.GetOutputData(), + OutputData: request.GetEvent().GetOutputData(), } default: logger.Debugf(ctx, "Offloading outputs per InlineEventDataPolicy") - uri, err := common.OffloadLiteralMap(ctx, storageClient, request.Event.GetOutputData(), - request.Event.ExecutionId.Project, request.Event.ExecutionId.Domain, request.Event.ExecutionId.Name, OutputsObjectSuffix) + uri, err := common.OffloadLiteralMap(ctx, storageClient, request.GetEvent().GetOutputData(), + request.GetEvent().GetExecutionId().GetProject(), request.GetEvent().GetExecutionId().GetDomain(), request.GetEvent().GetExecutionId().GetName(), OutputsObjectSuffix) if err != nil { return err } @@ -283,11 +283,11 @@ func UpdateExecutionModelState( }, } } - } else if request.Event.GetError() != nil { + } else if request.GetEvent().GetError() != nil { executionClosure.OutputResult = &admin.ExecutionClosure_Error{ - Error: request.Event.GetError(), + Error: request.GetEvent().GetError(), } - k := request.Event.GetError().Kind.String() + k := request.GetEvent().GetError().GetKind().String() execution.ErrorKind = &k execution.ErrorCode = &request.Event.GetError().Code } @@ -372,13 +372,13 @@ func FromExecutionModel(ctx context.Context, executionModel models.Execution, op return nil, flyteErrs.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal spec") } if len(opts.DefaultNamespace) > 0 { - if spec.Metadata == nil { + if spec.GetMetadata() == nil { spec.Metadata = &admin.ExecutionMetadata{} } - if spec.Metadata.SystemMetadata == nil { + if spec.GetMetadata().GetSystemMetadata() == nil { spec.Metadata.SystemMetadata = &admin.SystemMetadata{} } - if len(spec.GetMetadata().GetSystemMetadata().Namespace) == 0 { + if len(spec.GetMetadata().GetSystemMetadata().GetNamespace()) == 0 { logger.Infof(ctx, "setting execution system metadata namespace to [%s]", opts.DefaultNamespace) spec.Metadata.SystemMetadata.Namespace = opts.DefaultNamespace } @@ -388,7 +388,7 @@ func FromExecutionModel(ctx context.Context, executionModel models.Execution, op if err = proto.Unmarshal(executionModel.Closure, &closure); err != nil { return nil, flyteErrs.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal closure") } - if closure.GetError() != nil && opts != nil && opts.TrimErrorMessage && len(closure.GetError().Message) > 0 { + if closure.GetError() != nil && opts != nil && opts.TrimErrorMessage && len(closure.GetError().GetMessage()) > 0 { trimmedErrOutputResult := closure.GetError() trimmedErrMessage := TrimErrorMessage(trimmedErrOutputResult.GetMessage()) trimmedErrOutputResult.Message = trimmedErrMessage @@ -397,7 +397,7 @@ func FromExecutionModel(ctx context.Context, executionModel models.Execution, op } } - if closure.StateChangeDetails == nil { + if closure.GetStateChangeDetails() == nil { // Update execution state details from model for older executions if closure.StateChangeDetails, err = PopulateDefaultStateChangeDetails(executionModel); err != nil { return nil, err diff --git a/flyteadmin/pkg/repositories/transformers/execution_event.go b/flyteadmin/pkg/repositories/transformers/execution_event.go index 34f3c4d84a..f603af44e6 100644 --- a/flyteadmin/pkg/repositories/transformers/execution_event.go +++ b/flyteadmin/pkg/repositories/transformers/execution_event.go @@ -11,18 +11,18 @@ import ( // Transforms a ExecutionEventCreateRequest to a ExecutionEvent model func CreateExecutionEventModel(request *admin.WorkflowExecutionEventRequest) (*models.ExecutionEvent, error) { - occurredAt, err := ptypes.Timestamp(request.Event.OccurredAt) + occurredAt, err := ptypes.Timestamp(request.GetEvent().GetOccurredAt()) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to marshal occurred at timestamp") } return &models.ExecutionEvent{ ExecutionKey: models.ExecutionKey{ - Project: request.Event.ExecutionId.Project, - Domain: request.Event.ExecutionId.Domain, - Name: request.Event.ExecutionId.Name, + Project: request.GetEvent().GetExecutionId().GetProject(), + Domain: request.GetEvent().GetExecutionId().GetDomain(), + Name: request.GetEvent().GetExecutionId().GetName(), }, - RequestID: request.RequestId, + RequestID: request.GetRequestId(), OccurredAt: occurredAt, - Phase: request.Event.Phase.String(), + Phase: request.GetEvent().GetPhase().String(), }, nil } diff --git a/flyteadmin/pkg/repositories/transformers/execution_test.go b/flyteadmin/pkg/repositories/transformers/execution_test.go index c7b9f33e95..5ea50cefe4 100644 --- a/flyteadmin/pkg/repositories/transformers/execution_test.go +++ b/flyteadmin/pkg/repositories/transformers/execution_test.go @@ -78,7 +78,7 @@ func TestCreateExecutionModel(t *testing.T) { Domain: "domain", Name: "name", }, - RequestSpec: execRequest.Spec, + RequestSpec: execRequest.GetSpec(), LaunchPlanID: lpID, WorkflowID: wfID, CreatedAt: createdAt, @@ -103,7 +103,7 @@ func TestCreateExecutionModel(t *testing.T) { assert.Equal(t, sourceID, execution.SourceExecutionID) assert.Equal(t, "launch_plan", execution.LaunchEntity) assert.Equal(t, execution.Phase, core.WorkflowExecution_UNDEFINED.String()) - expectedSpec := execRequest.Spec + expectedSpec := execRequest.GetSpec() expectedSpec.Metadata.Principal = principal expectedSpec.Metadata.SystemMetadata = &admin.SystemMetadata{ ExecutionCluster: cluster, @@ -136,7 +136,7 @@ func TestCreateExecutionModel(t *testing.T) { Domain: "domain", Name: "name", }, - RequestSpec: execRequest.Spec, + RequestSpec: execRequest.GetSpec(), LaunchPlanID: lpID, WorkflowID: wfID, CreatedAt: createdAt, @@ -162,7 +162,7 @@ func TestCreateExecutionModel(t *testing.T) { assert.Equal(t, sourceID, execution.SourceExecutionID) assert.Equal(t, "launch_plan", execution.LaunchEntity) assert.Equal(t, core.WorkflowExecution_FAILED.String(), execution.Phase) - expectedSpec := execRequest.Spec + expectedSpec := execRequest.GetSpec() expectedSpec.Metadata.Principal = principal expectedSpec.Metadata.SystemMetadata = &admin.SystemMetadata{ ExecutionCluster: cluster, @@ -202,7 +202,7 @@ func TestCreateExecutionModel(t *testing.T) { Domain: "domain", Name: "name", }, - RequestSpec: execRequest.Spec, + RequestSpec: execRequest.GetSpec(), LaunchPlanID: lpID, WorkflowID: wfID, CreatedAt: createdAt, @@ -228,7 +228,7 @@ func TestCreateExecutionModel(t *testing.T) { assert.Equal(t, sourceID, execution.SourceExecutionID) assert.Equal(t, "launch_plan", execution.LaunchEntity) assert.Equal(t, core.WorkflowExecution_FAILED.String(), execution.Phase) - expectedSpec := execRequest.Spec + expectedSpec := execRequest.GetSpec() expectedSpec.Metadata.Principal = principal expectedSpec.Metadata.SystemMetadata = &admin.SystemMetadata{ ExecutionCluster: cluster, @@ -268,7 +268,7 @@ func TestCreateExecutionModel(t *testing.T) { Domain: "domain", Name: "name", }, - RequestSpec: execRequest.Spec, + RequestSpec: execRequest.GetSpec(), LaunchPlanID: lpID, WorkflowID: wfID, CreatedAt: createdAt, @@ -294,7 +294,7 @@ func TestCreateExecutionModel(t *testing.T) { assert.Equal(t, sourceID, execution.SourceExecutionID) assert.Equal(t, "launch_plan", execution.LaunchEntity) assert.Equal(t, core.WorkflowExecution_FAILED.String(), execution.Phase) - expectedSpec := execRequest.Spec + expectedSpec := execRequest.GetSpec() expectedSpec.Metadata.Principal = principal expectedSpec.Metadata.SystemMetadata = &admin.SystemMetadata{ ExecutionCluster: cluster, @@ -341,7 +341,7 @@ func TestUpdateModelState_UnknownToRunning(t *testing.T) { Phase: core.WorkflowExecution_UNDEFINED, CreatedAt: createdAtProto, } - spec := testutils.GetExecutionRequest().Spec + spec := testutils.GetExecutionRequest().GetSpec() specBytes, _ := proto.Marshal(spec) existingClosureBytes, _ := proto.Marshal(&existingClosure) startedAt := time.Now() @@ -401,7 +401,7 @@ func TestUpdateModelState_RunningToFailed(t *testing.T) { } ec := "foo" ek := core.ExecutionError_SYSTEM - spec := testutils.GetExecutionRequest().Spec + spec := testutils.GetExecutionRequest().GetSpec() specBytes, _ := proto.Marshal(spec) existingClosureBytes, _ := proto.Marshal(&existingClosure) executionModel := getRunningExecutionModel(specBytes, existingClosureBytes, startedAt) @@ -474,7 +474,7 @@ func TestUpdateModelState_RunningToSuccess(t *testing.T) { Phase: core.WorkflowExecution_RUNNING, StartedAt: startedAtProto, } - spec := testutils.GetExecutionRequest().Spec + spec := testutils.GetExecutionRequest().GetSpec() specBytes, _ := proto.Marshal(spec) existingClosureBytes, _ := proto.Marshal(&existingClosure) executionModel := getRunningExecutionModel(specBytes, existingClosureBytes, startedAt) @@ -692,7 +692,7 @@ func TestGetExecutionIdentifier(t *testing.T) { } func TestFromExecutionModel(t *testing.T) { - spec := testutils.GetExecutionRequest().Spec + spec := testutils.GetExecutionRequest().GetSpec() specBytes, _ := proto.Marshal(spec) phase := core.WorkflowExecution_RUNNING.String() startedAt := time.Date(2018, 8, 30, 0, 0, 0, 0, time.UTC) @@ -700,7 +700,7 @@ func TestFromExecutionModel(t *testing.T) { startedAtProto, _ := ptypes.TimestampProto(startedAt) createdAtProto, _ := ptypes.TimestampProto(createdAt) closure := admin.ExecutionClosure{ - ComputedInputs: spec.Inputs, + ComputedInputs: spec.GetInputs(), Phase: core.WorkflowExecution_RUNNING, StartedAt: startedAtProto, StateChangeDetails: &admin.ExecutionStateChangeDetails{ @@ -758,15 +758,15 @@ func TestFromExecutionModel_Aborted(t *testing.T) { } execution, err := FromExecutionModel(context.TODO(), executionModel, DefaultExecutionTransformerOptions) assert.Nil(t, err) - assert.Equal(t, core.WorkflowExecution_ABORTED, execution.Closure.Phase) + assert.Equal(t, core.WorkflowExecution_ABORTED, execution.GetClosure().GetPhase()) assert.True(t, proto.Equal(&admin.AbortMetadata{ Cause: abortCause, - }, execution.Closure.GetAbortMetadata())) + }, execution.GetClosure().GetAbortMetadata())) executionModel.Phase = core.WorkflowExecution_RUNNING.String() execution, err = FromExecutionModel(context.TODO(), executionModel, DefaultExecutionTransformerOptions) assert.Nil(t, err) - assert.Empty(t, execution.Closure.GetAbortCause()) + assert.Empty(t, execution.GetClosure().GetAbortCause()) } func TestFromExecutionModel_Error(t *testing.T) { @@ -795,8 +795,8 @@ func TestFromExecutionModel_Error(t *testing.T) { expectedExecErr := execErr expectedExecErr.Message = string(make([]byte, trimmedErrMessageLen)) assert.Nil(t, err) - assert.Equal(t, core.WorkflowExecution_FAILED, execution.Closure.Phase) - assert.True(t, proto.Equal(expectedExecErr, execution.Closure.GetError())) + assert.Equal(t, core.WorkflowExecution_FAILED, execution.GetClosure().GetPhase()) + assert.True(t, proto.Equal(expectedExecErr, execution.GetClosure().GetError())) } func TestFromExecutionModel_ValidUTF8TrimmedErrorMsg(t *testing.T) { @@ -843,11 +843,11 @@ func TestFromExecutionModel_OverwriteNamespace(t *testing.T) { DefaultNamespace: overwrittenNamespace, }) assert.NoError(t, err) - assert.Equal(t, execution.GetSpec().GetMetadata().GetSystemMetadata().Namespace, overwrittenNamespace) + assert.Equal(t, execution.GetSpec().GetMetadata().GetSystemMetadata().GetNamespace(), overwrittenNamespace) } func TestFromExecutionModels(t *testing.T) { - spec := testutils.GetExecutionRequest().Spec + spec := testutils.GetExecutionRequest().GetSpec() specBytes, _ := proto.Marshal(spec) phase := core.WorkflowExecution_SUCCEEDED.String() startedAt := time.Date(2018, 8, 30, 0, 0, 0, 0, time.UTC) @@ -857,7 +857,7 @@ func TestFromExecutionModels(t *testing.T) { duration := 2 * time.Minute durationProto := ptypes.DurationProto(duration) closure := admin.ExecutionClosure{ - ComputedInputs: spec.Inputs, + ComputedInputs: spec.GetInputs(), Phase: core.WorkflowExecution_RUNNING, StartedAt: startedAtProto, Duration: durationProto, @@ -914,7 +914,7 @@ func TestUpdateModelState_WithClusterInformation(t *testing.T) { Phase: core.WorkflowExecution_UNDEFINED, CreatedAt: createdAtProto, } - spec := testutils.GetExecutionRequest().Spec + spec := testutils.GetExecutionRequest().GetSpec() specBytes, _ := proto.Marshal(spec) existingClosureBytes, _ := proto.Marshal(&existingClosure) startedAt := time.Now() @@ -982,7 +982,7 @@ func TestReassignCluster(t *testing.T) { } t.Run("happy case", func(t *testing.T) { - spec := testutils.GetExecutionRequest().Spec + spec := testutils.GetExecutionRequest().GetSpec() spec.Metadata = &admin.ExecutionMetadata{ SystemMetadata: &admin.SystemMetadata{ ExecutionCluster: oldCluster, @@ -1000,10 +1000,10 @@ func TestReassignCluster(t *testing.T) { var updatedSpec admin.ExecutionSpec err = proto.Unmarshal(executionModel.Spec, &updatedSpec) assert.NoError(t, err) - assert.Equal(t, newCluster, updatedSpec.Metadata.SystemMetadata.ExecutionCluster) + assert.Equal(t, newCluster, updatedSpec.GetMetadata().GetSystemMetadata().GetExecutionCluster()) }) t.Run("happy case - initialize cluster", func(t *testing.T) { - spec := testutils.GetExecutionRequest().Spec + spec := testutils.GetExecutionRequest().GetSpec() specBytes, _ := proto.Marshal(spec) executionModel := models.Execution{ Spec: specBytes, @@ -1015,7 +1015,7 @@ func TestReassignCluster(t *testing.T) { var updatedSpec admin.ExecutionSpec err = proto.Unmarshal(executionModel.Spec, &updatedSpec) assert.NoError(t, err) - assert.Equal(t, newCluster, updatedSpec.Metadata.SystemMetadata.ExecutionCluster) + assert.Equal(t, newCluster, updatedSpec.GetMetadata().GetSystemMetadata().GetExecutionCluster()) }) t.Run("invalid existing spec", func(t *testing.T) { executionModel := models.Execution{ @@ -1040,9 +1040,9 @@ func TestGetExecutionStateFromModel(t *testing.T) { executionStatus, err := PopulateDefaultStateChangeDetails(executionModel) assert.Nil(t, err) assert.NotNil(t, executionStatus) - assert.Equal(t, admin.ExecutionState_EXECUTION_ACTIVE, executionStatus.State) - assert.NotNil(t, executionStatus.OccurredAt) - assert.Equal(t, createdAtProto, executionStatus.OccurredAt) + assert.Equal(t, admin.ExecutionState_EXECUTION_ACTIVE, executionStatus.GetState()) + assert.NotNil(t, executionStatus.GetOccurredAt()) + assert.Equal(t, createdAtProto, executionStatus.GetOccurredAt()) }) t.Run("incorrect created at", func(t *testing.T) { createdAt := time.Unix(math.MinInt64, math.MinInt32).UTC() @@ -1072,10 +1072,10 @@ func TestUpdateExecutionModelStateChangeDetails(t *testing.T) { err = proto.Unmarshal(execModel.Closure, closure) assert.Nil(t, err) assert.NotNil(t, closure) - assert.NotNil(t, closure.StateChangeDetails) - assert.Equal(t, admin.ExecutionState_EXECUTION_ARCHIVED, closure.StateChangeDetails.State) - assert.Equal(t, "dummyUser", closure.StateChangeDetails.Principal) - assert.Equal(t, statetUpdateAtProto, closure.StateChangeDetails.OccurredAt) + assert.NotNil(t, closure.GetStateChangeDetails()) + assert.Equal(t, admin.ExecutionState_EXECUTION_ARCHIVED, closure.GetStateChangeDetails().GetState()) + assert.Equal(t, "dummyUser", closure.GetStateChangeDetails().GetPrincipal()) + assert.Equal(t, statetUpdateAtProto, closure.GetStateChangeDetails().GetOccurredAt()) }) t.Run("bad closure", func(t *testing.T) { diff --git a/flyteadmin/pkg/repositories/transformers/launch_plan.go b/flyteadmin/pkg/repositories/transformers/launch_plan.go index a7b33736d1..acfa14282a 100644 --- a/flyteadmin/pkg/repositories/transformers/launch_plan.go +++ b/flyteadmin/pkg/repositories/transformers/launch_plan.go @@ -16,10 +16,10 @@ func CreateLaunchPlan( expectedOutputs *core.VariableMap) *admin.LaunchPlan { return &admin.LaunchPlan{ - Id: request.Id, - Spec: request.Spec, + Id: request.GetId(), + Spec: request.GetSpec(), Closure: &admin.LaunchPlanClosure{ - ExpectedInputs: request.Spec.DefaultInputs, + ExpectedInputs: request.GetSpec().GetDefaultInputs(), ExpectedOutputs: expectedOutputs, }, } @@ -31,22 +31,22 @@ func CreateLaunchPlanModel( workflowRepoID uint, digest []byte, initState admin.LaunchPlanState) (models.LaunchPlan, error) { - spec, err := proto.Marshal(launchPlan.Spec) + spec, err := proto.Marshal(launchPlan.GetSpec()) if err != nil { return models.LaunchPlan{}, errors.NewFlyteAdminError(codes.Internal, "Failed to serialize launch plan spec") } - closure, err := proto.Marshal(launchPlan.Closure) + closure, err := proto.Marshal(launchPlan.GetClosure()) if err != nil { return models.LaunchPlan{}, errors.NewFlyteAdminError(codes.Internal, "Failed to serialize launch plan closure") } var launchConditionType models.LaunchConditionType scheduleType := models.LaunchPlanScheduleTypeNONE - if launchPlan.Spec.EntityMetadata != nil && launchPlan.Spec.EntityMetadata.Schedule != nil { - if launchPlan.Spec.EntityMetadata.Schedule.GetCronExpression() != "" || launchPlan.Spec.EntityMetadata.Schedule.GetCronSchedule() != nil { + if launchPlan.GetSpec().GetEntityMetadata() != nil && launchPlan.GetSpec().GetEntityMetadata().GetSchedule() != nil { + if launchPlan.GetSpec().GetEntityMetadata().GetSchedule().GetCronExpression() != "" || launchPlan.GetSpec().GetEntityMetadata().GetSchedule().GetCronSchedule() != nil { scheduleType = models.LaunchPlanScheduleTypeCRON launchConditionType = models.LaunchConditionTypeSCHED - } else if launchPlan.Spec.EntityMetadata.Schedule.GetRate() != nil { + } else if launchPlan.GetSpec().GetEntityMetadata().GetSchedule().GetRate() != nil { scheduleType = models.LaunchPlanScheduleTypeRATE launchConditionType = models.LaunchConditionTypeSCHED } @@ -56,10 +56,10 @@ func CreateLaunchPlanModel( lpModel := models.LaunchPlan{ LaunchPlanKey: models.LaunchPlanKey{ - Project: launchPlan.Id.Project, - Domain: launchPlan.Id.Domain, - Name: launchPlan.Id.Name, - Version: launchPlan.Id.Version, + Project: launchPlan.GetId().GetProject(), + Domain: launchPlan.GetId().GetDomain(), + Name: launchPlan.GetId().GetName(), + Version: launchPlan.GetId().GetVersion(), }, Spec: spec, State: &state, diff --git a/flyteadmin/pkg/repositories/transformers/launch_plan_test.go b/flyteadmin/pkg/repositories/transformers/launch_plan_test.go index 65846de42b..c4551f89e5 100644 --- a/flyteadmin/pkg/repositories/transformers/launch_plan_test.go +++ b/flyteadmin/pkg/repositories/transformers/launch_plan_test.go @@ -39,8 +39,8 @@ func TestCreateLaunchPlan(t *testing.T) { launchPlan := CreateLaunchPlan(request, expectedOutputs) assert.True(t, proto.Equal( &admin.LaunchPlan{ - Id: request.Id, - Spec: request.Spec, + Id: request.GetId(), + Spec: request.GetSpec(), Closure: &admin.LaunchPlanClosure{ ExpectedInputs: expectedInputs, ExpectedOutputs: expectedOutputs, @@ -54,8 +54,8 @@ func TestToLaunchPlanModel(t *testing.T) { launchPlanDigest := []byte("launch plan") launchPlan := &admin.LaunchPlan{ - Id: lpRequest.Id, - Spec: lpRequest.Spec, + Id: lpRequest.GetId(), + Spec: lpRequest.GetSpec(), Closure: &admin.LaunchPlanClosure{ ExpectedInputs: expectedInputs, ExpectedOutputs: expectedOutputs, @@ -70,11 +70,11 @@ func TestToLaunchPlanModel(t *testing.T) { assert.Equal(t, "version", launchPlanModel.Version) assert.Equal(t, workflowID, launchPlanModel.WorkflowID) - expectedSpec, _ := proto.Marshal(lpRequest.Spec) + expectedSpec, _ := proto.Marshal(lpRequest.GetSpec()) assert.Equal(t, expectedSpec, launchPlanModel.Spec) assert.Equal(t, models.LaunchPlanScheduleTypeNONE, launchPlanModel.ScheduleType) - expectedClosure := launchPlan.Closure + expectedClosure := launchPlan.GetClosure() var actualClosure admin.LaunchPlanClosure err = proto.Unmarshal(launchPlanModel.Closure, &actualClosure) @@ -101,8 +101,8 @@ func testLaunchPlanWithCronInternal(t *testing.T, lpRequest *admin.LaunchPlanCre launchPlanDigest := []byte("launch plan") launchPlan := &admin.LaunchPlan{ - Id: lpRequest.Id, - Spec: lpRequest.Spec, + Id: lpRequest.GetId(), + Spec: lpRequest.GetSpec(), Closure: &admin.LaunchPlanClosure{ ExpectedInputs: expectedInputs, ExpectedOutputs: expectedOutputs, @@ -117,11 +117,11 @@ func testLaunchPlanWithCronInternal(t *testing.T, lpRequest *admin.LaunchPlanCre assert.Equal(t, "version", launchPlanModel.Version) assert.Equal(t, workflowID, launchPlanModel.WorkflowID) - expectedSpec, _ := proto.Marshal(lpRequest.Spec) + expectedSpec, _ := proto.Marshal(lpRequest.GetSpec()) assert.Equal(t, expectedSpec, launchPlanModel.Spec) assert.Equal(t, models.LaunchPlanScheduleTypeCRON, launchPlanModel.ScheduleType) - expectedClosure := launchPlan.Closure + expectedClosure := launchPlan.GetClosure() var actualClosure admin.LaunchPlanClosure err = proto.Unmarshal(launchPlanModel.Closure, &actualClosure) @@ -137,8 +137,8 @@ func TestToLaunchPlanModelWithFixedRateSchedule(t *testing.T) { launchPlanDigest := []byte("launch plan") launchPlan := &admin.LaunchPlan{ - Id: lpRequest.Id, - Spec: lpRequest.Spec, + Id: lpRequest.GetId(), + Spec: lpRequest.GetSpec(), Closure: &admin.LaunchPlanClosure{ ExpectedInputs: expectedInputs, ExpectedOutputs: expectedOutputs, @@ -153,11 +153,11 @@ func TestToLaunchPlanModelWithFixedRateSchedule(t *testing.T) { assert.Equal(t, "version", launchPlanModel.Version) assert.Equal(t, workflowID, launchPlanModel.WorkflowID) - expectedSpec, _ := proto.Marshal(lpRequest.Spec) + expectedSpec, _ := proto.Marshal(lpRequest.GetSpec()) assert.Equal(t, expectedSpec, launchPlanModel.Spec) assert.Equal(t, models.LaunchPlanScheduleTypeRATE, launchPlanModel.ScheduleType) - expectedClosure := launchPlan.Closure + expectedClosure := launchPlan.GetClosure() var actualClosure admin.LaunchPlanClosure err = proto.Unmarshal(launchPlanModel.Closure, &actualClosure) @@ -174,13 +174,13 @@ func TestFromLaunchPlanModel(t *testing.T) { updatedAt := createdAt.Add(time.Minute) updatedAtProto, _ := ptypes.TimestampProto(updatedAt) closure := admin.LaunchPlanClosure{ - ExpectedInputs: lpRequest.Spec.DefaultInputs, - ExpectedOutputs: workflowRequest.Spec.Template.Interface.Outputs, + ExpectedInputs: lpRequest.GetSpec().GetDefaultInputs(), + ExpectedOutputs: workflowRequest.GetSpec().GetTemplate().GetInterface().GetOutputs(), CreatedAt: createdAtProto, UpdatedAt: updatedAtProto, State: admin.LaunchPlanState_ACTIVE, } - specBytes, _ := proto.Marshal(lpRequest.Spec) + specBytes, _ := proto.Marshal(lpRequest.GetSpec()) closureBytes, _ := proto.Marshal(&closure) model := models.LaunchPlan{ @@ -206,9 +206,9 @@ func TestFromLaunchPlanModel(t *testing.T) { Domain: "domain", Name: "name", Version: "version", - }, lp.Id)) - assert.True(t, proto.Equal(&closure, lp.Closure)) - assert.True(t, proto.Equal(lpRequest.Spec, lp.Spec)) + }, lp.GetId())) + assert.True(t, proto.Equal(&closure, lp.GetClosure())) + assert.True(t, proto.Equal(lpRequest.GetSpec(), lp.GetSpec())) } func TestFromLaunchPlanModels(t *testing.T) { @@ -220,13 +220,13 @@ func TestFromLaunchPlanModels(t *testing.T) { updatedAt := createdAt.Add(time.Minute) updatedAtProto, _ := ptypes.TimestampProto(updatedAt) closure := admin.LaunchPlanClosure{ - ExpectedInputs: lpRequest.Spec.DefaultInputs, - ExpectedOutputs: workflowRequest.Spec.Template.Interface.Outputs, + ExpectedInputs: lpRequest.GetSpec().GetDefaultInputs(), + ExpectedOutputs: workflowRequest.GetSpec().GetTemplate().GetInterface().GetOutputs(), CreatedAt: createdAtProto, UpdatedAt: updatedAtProto, State: admin.LaunchPlanState_ACTIVE, } - specBytes, _ := proto.Marshal(lpRequest.Spec) + specBytes, _ := proto.Marshal(lpRequest.GetSpec()) closureBytes, _ := proto.Marshal(&closure) m1 := models.LaunchPlan{ @@ -272,7 +272,7 @@ func TestFromLaunchPlanModels(t *testing.T) { Domain: "staging", Name: "othername", Version: "versionsecond", - }, lp[1].Id)) - assert.True(t, proto.Equal(&closure, lp[1].Closure)) - assert.True(t, proto.Equal(lpRequest.Spec, lp[1].Spec)) + }, lp[1].GetId())) + assert.True(t, proto.Equal(&closure, lp[1].GetClosure())) + assert.True(t, proto.Equal(lpRequest.GetSpec(), lp[1].GetSpec())) } diff --git a/flyteadmin/pkg/repositories/transformers/named_entity.go b/flyteadmin/pkg/repositories/transformers/named_entity.go index 14c5818786..646d673923 100644 --- a/flyteadmin/pkg/repositories/transformers/named_entity.go +++ b/flyteadmin/pkg/repositories/transformers/named_entity.go @@ -6,16 +6,16 @@ import ( ) func CreateNamedEntityModel(request *admin.NamedEntityUpdateRequest) models.NamedEntity { - stateInt := int32(request.Metadata.State) + stateInt := int32(request.GetMetadata().GetState()) return models.NamedEntity{ NamedEntityKey: models.NamedEntityKey{ - ResourceType: request.ResourceType, - Project: request.Id.Project, - Domain: request.Id.Domain, - Name: request.Id.Name, + ResourceType: request.GetResourceType(), + Project: request.GetId().GetProject(), + Domain: request.GetId().GetDomain(), + Name: request.GetId().GetName(), }, NamedEntityMetadataFields: models.NamedEntityMetadataFields{ - Description: request.Metadata.Description, + Description: request.GetMetadata().GetDescription(), State: &stateInt, }, } diff --git a/flyteadmin/pkg/repositories/transformers/node_execution.go b/flyteadmin/pkg/repositories/transformers/node_execution.go index 817f53290a..107e9efb70 100644 --- a/flyteadmin/pkg/repositories/transformers/node_execution.go +++ b/flyteadmin/pkg/repositories/transformers/node_execution.go @@ -30,7 +30,7 @@ type ToNodeExecutionModelInput struct { func addNodeRunningState(request *admin.NodeExecutionEventRequest, nodeExecutionModel *models.NodeExecution, closure *admin.NodeExecutionClosure) error { - occurredAt, err := ptypes.Timestamp(request.Event.OccurredAt) + occurredAt, err := ptypes.Timestamp(request.GetEvent().GetOccurredAt()) if err != nil { return errors.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal occurredAt with error: %v", err) } @@ -49,10 +49,10 @@ func addTerminalState( ctx context.Context, request *admin.NodeExecutionEventRequest, nodeExecutionModel *models.NodeExecution, closure *admin.NodeExecutionClosure, inlineEventDataPolicy interfaces.InlineEventDataPolicy, storageClient *storage.DataStore) error { - if closure.StartedAt == nil { + if closure.GetStartedAt() == nil { logger.Warning(context.Background(), "node execution is missing StartedAt") } else { - endTime, err := ptypes.Timestamp(request.Event.OccurredAt) + endTime, err := ptypes.Timestamp(request.GetEvent().GetOccurredAt()) if err != nil { return errors.NewFlyteAdminErrorf( codes.Internal, "Failed to parse node execution occurred at timestamp: %v", err) @@ -62,21 +62,21 @@ func addTerminalState( } // Serialize output results (if they exist) - if request.Event.GetOutputUri() != "" { + if request.GetEvent().GetOutputUri() != "" { closure.OutputResult = &admin.NodeExecutionClosure_OutputUri{ - OutputUri: request.Event.GetOutputUri(), + OutputUri: request.GetEvent().GetOutputUri(), } - } else if request.Event.GetOutputData() != nil { + } else if request.GetEvent().GetOutputData() != nil { switch inlineEventDataPolicy { case interfaces.InlineEventDataPolicyStoreInline: closure.OutputResult = &admin.NodeExecutionClosure_OutputData{ - OutputData: request.Event.GetOutputData(), + OutputData: request.GetEvent().GetOutputData(), } default: logger.Debugf(ctx, "Offloading outputs per InlineEventDataPolicy") - uri, err := common.OffloadLiteralMap(ctx, storageClient, request.Event.GetOutputData(), - request.Event.Id.ExecutionId.Project, request.Event.Id.ExecutionId.Domain, request.Event.Id.ExecutionId.Name, - request.Event.Id.NodeId, OutputsObjectSuffix) + uri, err := common.OffloadLiteralMap(ctx, storageClient, request.GetEvent().GetOutputData(), + request.GetEvent().GetId().GetExecutionId().GetProject(), request.GetEvent().GetId().GetExecutionId().GetDomain(), request.GetEvent().GetId().GetExecutionId().GetName(), + request.GetEvent().GetId().GetNodeId(), OutputsObjectSuffix) if err != nil { return err } @@ -84,15 +84,15 @@ func addTerminalState( OutputUri: uri.String(), } } - } else if request.Event.GetError() != nil { + } else if request.GetEvent().GetError() != nil { closure.OutputResult = &admin.NodeExecutionClosure_Error{ - Error: request.Event.GetError(), + Error: request.GetEvent().GetError(), } - k := request.Event.GetError().Kind.String() + k := request.GetEvent().GetError().GetKind().String() nodeExecutionModel.ErrorKind = &k nodeExecutionModel.ErrorCode = &request.Event.GetError().Code } - closure.DeckUri = request.Event.DeckUri + closure.DeckUri = request.GetEvent().GetDeckUri() return nil } @@ -100,47 +100,47 @@ func addTerminalState( func CreateNodeExecutionModel(ctx context.Context, input ToNodeExecutionModelInput) (*models.NodeExecution, error) { nodeExecution := &models.NodeExecution{ NodeExecutionKey: models.NodeExecutionKey{ - NodeID: input.Request.Event.Id.NodeId, + NodeID: input.Request.GetEvent().GetId().GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: input.Request.Event.Id.ExecutionId.Project, - Domain: input.Request.Event.Id.ExecutionId.Domain, - Name: input.Request.Event.Id.ExecutionId.Name, + Project: input.Request.GetEvent().GetId().GetExecutionId().GetProject(), + Domain: input.Request.GetEvent().GetId().GetExecutionId().GetDomain(), + Name: input.Request.GetEvent().GetId().GetExecutionId().GetName(), }, }, - Phase: input.Request.Event.Phase.String(), + Phase: input.Request.GetEvent().GetPhase().String(), } - reportedAt := input.Request.Event.ReportedAt - if reportedAt == nil || (reportedAt.Seconds == 0 && reportedAt.Nanos == 0) { - reportedAt = input.Request.Event.OccurredAt + reportedAt := input.Request.GetEvent().GetReportedAt() + if reportedAt == nil || (reportedAt.GetSeconds() == 0 && reportedAt.GetNanos() == 0) { + reportedAt = input.Request.GetEvent().GetOccurredAt() } closure := admin.NodeExecutionClosure{ - Phase: input.Request.Event.Phase, - CreatedAt: input.Request.Event.OccurredAt, + Phase: input.Request.GetEvent().GetPhase(), + CreatedAt: input.Request.GetEvent().GetOccurredAt(), UpdatedAt: reportedAt, } nodeExecutionMetadata := admin.NodeExecutionMetaData{ - RetryGroup: input.Request.Event.RetryGroup, - SpecNodeId: input.Request.Event.SpecNodeId, - IsParentNode: input.Request.Event.IsParent, - IsDynamic: input.Request.Event.IsDynamic, - IsArray: input.Request.Event.IsArray, + RetryGroup: input.Request.GetEvent().GetRetryGroup(), + SpecNodeId: input.Request.GetEvent().GetSpecNodeId(), + IsParentNode: input.Request.GetEvent().GetIsParent(), + IsDynamic: input.Request.GetEvent().GetIsDynamic(), + IsArray: input.Request.GetEvent().GetIsArray(), } err := handleNodeExecutionInputs(ctx, nodeExecution, input.Request, input.StorageClient) if err != nil { return nil, err } - if input.Request.Event.Phase == core.NodeExecution_RUNNING { + if input.Request.GetEvent().GetPhase() == core.NodeExecution_RUNNING { err := addNodeRunningState(input.Request, nodeExecution, &closure) if err != nil { return nil, err } } - if common.IsNodeExecutionTerminal(input.Request.Event.Phase) { + if common.IsNodeExecutionTerminal(input.Request.GetEvent().GetPhase()) { err := addTerminalState(ctx, input.Request, nodeExecution, &closure, input.InlineEventDataPolicy, input.StorageClient) if err != nil { return nil, err @@ -148,16 +148,16 @@ func CreateNodeExecutionModel(ctx context.Context, input ToNodeExecutionModelInp } // Update TaskNodeMetadata, which includes caching information today. - if input.Request.Event.GetTaskNodeMetadata() != nil { + if input.Request.GetEvent().GetTaskNodeMetadata() != nil { targetMetadata := &admin.NodeExecutionClosure_TaskNodeMetadata{ TaskNodeMetadata: &admin.TaskNodeMetadata{ - CheckpointUri: input.Request.Event.GetTaskNodeMetadata().CheckpointUri, + CheckpointUri: input.Request.GetEvent().GetTaskNodeMetadata().GetCheckpointUri(), }, } - if input.Request.Event.GetTaskNodeMetadata().CatalogKey != nil { - st := input.Request.Event.GetTaskNodeMetadata().GetCacheStatus().String() - targetMetadata.TaskNodeMetadata.CacheStatus = input.Request.Event.GetTaskNodeMetadata().GetCacheStatus() - targetMetadata.TaskNodeMetadata.CatalogKey = input.Request.Event.GetTaskNodeMetadata().GetCatalogKey() + if input.Request.GetEvent().GetTaskNodeMetadata().GetCatalogKey() != nil { + st := input.Request.GetEvent().GetTaskNodeMetadata().GetCacheStatus().String() + targetMetadata.TaskNodeMetadata.CacheStatus = input.Request.GetEvent().GetTaskNodeMetadata().GetCacheStatus() + targetMetadata.TaskNodeMetadata.CatalogKey = input.Request.GetEvent().GetTaskNodeMetadata().GetCatalogKey() nodeExecution.CacheStatus = &st } closure.TargetMetadata = targetMetadata @@ -175,7 +175,7 @@ func CreateNodeExecutionModel(ctx context.Context, input ToNodeExecutionModelInp } nodeExecution.Closure = marshaledClosure nodeExecution.NodeExecutionMetadata = marshaledNodeExecutionMetadata - nodeExecutionCreatedAt, err := ptypes.Timestamp(input.Request.Event.OccurredAt) + nodeExecutionCreatedAt, err := ptypes.Timestamp(input.Request.GetEvent().GetOccurredAt()) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to read event timestamp") } @@ -185,14 +185,14 @@ func CreateNodeExecutionModel(ctx context.Context, input ToNodeExecutionModelInp return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to read event reported_at timestamp") } nodeExecution.NodeExecutionUpdatedAt = &nodeExecutionUpdatedAt - if input.Request.Event.ParentTaskMetadata != nil { + if input.Request.GetEvent().GetParentTaskMetadata() != nil { nodeExecution.ParentTaskExecutionID = input.ParentTaskExecutionID } nodeExecution.ParentID = input.ParentID nodeExecution.DynamicWorkflowRemoteClosureReference = input.DynamicWorkflowRemoteClosure internalData := &genModel.NodeExecutionInternalData{ - EventVersion: input.Request.Event.EventVersion, + EventVersion: input.Request.GetEvent().GetEventVersion(), } internalDataBytes, err := proto.Marshal(internalData) if err != nil { @@ -216,21 +216,21 @@ func UpdateNodeExecutionModel( return errors.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal node execution closure with error: %+v", err) } - nodeExecutionModel.Phase = request.Event.Phase.String() - nodeExecutionClosure.Phase = request.Event.Phase - reportedAt := request.Event.ReportedAt - if reportedAt == nil || (reportedAt.Seconds == 0 && reportedAt.Nanos == 0) { - reportedAt = request.Event.OccurredAt + nodeExecutionModel.Phase = request.GetEvent().GetPhase().String() + nodeExecutionClosure.Phase = request.GetEvent().GetPhase() + reportedAt := request.GetEvent().GetReportedAt() + if reportedAt == nil || (reportedAt.GetSeconds() == 0 && reportedAt.GetNanos() == 0) { + reportedAt = request.GetEvent().GetOccurredAt() } nodeExecutionClosure.UpdatedAt = reportedAt - if request.Event.Phase == core.NodeExecution_RUNNING { + if request.GetEvent().GetPhase() == core.NodeExecution_RUNNING { err := addNodeRunningState(request, nodeExecutionModel, &nodeExecutionClosure) if err != nil { return err } } - if common.IsNodeExecutionTerminal(request.Event.Phase) { + if common.IsNodeExecutionTerminal(request.GetEvent().GetPhase()) { err := addTerminalState(ctx, request, nodeExecutionModel, &nodeExecutionClosure, inlineEventDataPolicy, storageClient) if err != nil { return err @@ -247,24 +247,24 @@ func UpdateNodeExecutionModel( } // Update TaskNodeMetadata, which includes caching information today. - if request.Event.GetTaskNodeMetadata() != nil { + if request.GetEvent().GetTaskNodeMetadata() != nil { targetMetadata := &admin.NodeExecutionClosure_TaskNodeMetadata{ TaskNodeMetadata: &admin.TaskNodeMetadata{ - CheckpointUri: request.Event.GetTaskNodeMetadata().CheckpointUri, + CheckpointUri: request.GetEvent().GetTaskNodeMetadata().GetCheckpointUri(), }, } - if request.Event.GetTaskNodeMetadata().CatalogKey != nil { - st := request.Event.GetTaskNodeMetadata().GetCacheStatus().String() - targetMetadata.TaskNodeMetadata.CacheStatus = request.Event.GetTaskNodeMetadata().GetCacheStatus() - targetMetadata.TaskNodeMetadata.CatalogKey = request.Event.GetTaskNodeMetadata().GetCatalogKey() + if request.GetEvent().GetTaskNodeMetadata().GetCatalogKey() != nil { + st := request.GetEvent().GetTaskNodeMetadata().GetCacheStatus().String() + targetMetadata.TaskNodeMetadata.CacheStatus = request.GetEvent().GetTaskNodeMetadata().GetCacheStatus() + targetMetadata.TaskNodeMetadata.CatalogKey = request.GetEvent().GetTaskNodeMetadata().GetCatalogKey() nodeExecutionModel.CacheStatus = &st } nodeExecutionClosure.TargetMetadata = targetMetadata // if this is a dynamic task then maintain the DynamicJobSpecUri - dynamicWorkflowMetadata := request.Event.GetTaskNodeMetadata().DynamicWorkflow - if dynamicWorkflowMetadata != nil && len(dynamicWorkflowMetadata.DynamicJobSpecUri) > 0 { - nodeExecutionClosure.DynamicJobSpecUri = dynamicWorkflowMetadata.DynamicJobSpecUri + dynamicWorkflowMetadata := request.GetEvent().GetTaskNodeMetadata().GetDynamicWorkflow() + if dynamicWorkflowMetadata != nil && len(dynamicWorkflowMetadata.GetDynamicJobSpecUri()) > 0 { + nodeExecutionClosure.DynamicJobSpecUri = dynamicWorkflowMetadata.GetDynamicJobSpecUri() } } @@ -284,7 +284,7 @@ func UpdateNodeExecutionModel( // In the case of dynamic nodes reporting DYNAMIC_RUNNING, the IsParent and IsDynamic bits will be set for this event. // Update the node execution metadata accordingly. - if request.Event.IsParent || request.Event.IsDynamic || request.Event.IsArray { + if request.GetEvent().GetIsParent() || request.GetEvent().GetIsDynamic() || request.GetEvent().GetIsArray() { var nodeExecutionMetadata admin.NodeExecutionMetaData if len(nodeExecutionModel.NodeExecutionMetadata) > 0 { if err := proto.Unmarshal(nodeExecutionModel.NodeExecutionMetadata, &nodeExecutionMetadata); err != nil { @@ -294,13 +294,13 @@ func UpdateNodeExecutionModel( } // Not every event sends IsParent and IsDynamic as an artifact of how propeller handles dynamic nodes. // Only explicitly set the fields, when they're set in the event itself. - if request.Event.IsParent { + if request.GetEvent().GetIsParent() { nodeExecutionMetadata.IsParentNode = true } - if request.Event.IsDynamic { + if request.GetEvent().GetIsDynamic() { nodeExecutionMetadata.IsDynamic = true } - if request.Event.IsArray { + if request.GetEvent().GetIsArray() { nodeExecutionMetadata.IsArray = true } nodeExecMetadataBytes, err := proto.Marshal(&nodeExecutionMetadata) @@ -321,7 +321,7 @@ func FromNodeExecutionModel(nodeExecutionModel models.NodeExecution, opts *Execu return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal closure") } - if closure.GetError() != nil && opts != nil && opts.TrimErrorMessage && len(closure.GetError().Message) > 0 { + if closure.GetError() != nil && opts != nil && opts.TrimErrorMessage && len(closure.GetError().GetMessage()) > 0 { trimmedErrOutputResult := closure.GetError() trimmedErrMessage := TrimErrorMessage(trimmedErrOutputResult.GetMessage()) trimmedErrOutputResult.Message = trimmedErrMessage @@ -337,7 +337,7 @@ func FromNodeExecutionModel(nodeExecutionModel models.NodeExecution, opts *Execu } // TODO: delete this block and references to preloading child node executions no earlier than Q3 2022 // This is required for historical reasons because propeller did not always send IsParent or IsDynamic in events. - if !(nodeExecutionMetadata.IsParentNode || nodeExecutionMetadata.IsDynamic) { + if !(nodeExecutionMetadata.GetIsParentNode() || nodeExecutionMetadata.GetIsDynamic()) { if len(nodeExecutionModel.ChildNodeExecutions) > 0 { nodeExecutionMetadata.IsParentNode = true if len(nodeExecutionModel.DynamicWorkflowRemoteClosureReference) > 0 { @@ -380,14 +380,14 @@ func handleNodeExecutionInputs(ctx context.Context, // Inputs are static over the duration of the node execution, no need to update them when they're already set return nil } - switch request.Event.GetInputValue().(type) { + switch request.GetEvent().GetInputValue().(type) { case *event.NodeExecutionEvent_InputUri: - logger.Debugf(ctx, "saving node execution input URI [%s]", request.Event.GetInputUri()) - nodeExecutionModel.InputURI = request.Event.GetInputUri() + logger.Debugf(ctx, "saving node execution input URI [%s]", request.GetEvent().GetInputUri()) + nodeExecutionModel.InputURI = request.GetEvent().GetInputUri() case *event.NodeExecutionEvent_InputData: - uri, err := common.OffloadLiteralMap(ctx, storageClient, request.Event.GetInputData(), - request.Event.Id.ExecutionId.Project, request.Event.Id.ExecutionId.Domain, request.Event.Id.ExecutionId.Name, - request.Event.Id.NodeId, InputsObjectSuffix) + uri, err := common.OffloadLiteralMap(ctx, storageClient, request.GetEvent().GetInputData(), + request.GetEvent().GetId().GetExecutionId().GetProject(), request.GetEvent().GetId().GetExecutionId().GetDomain(), request.GetEvent().GetId().GetExecutionId().GetName(), + request.GetEvent().GetId().GetNodeId(), InputsObjectSuffix) if err != nil { return err } diff --git a/flyteadmin/pkg/repositories/transformers/node_execution_event.go b/flyteadmin/pkg/repositories/transformers/node_execution_event.go index c817054499..88c4d8be0e 100644 --- a/flyteadmin/pkg/repositories/transformers/node_execution_event.go +++ b/flyteadmin/pkg/repositories/transformers/node_execution_event.go @@ -11,21 +11,21 @@ import ( // Transforms a NodeExecutionEventRequest to a NodeExecutionEvent model func CreateNodeExecutionEventModel(request *admin.NodeExecutionEventRequest) (*models.NodeExecutionEvent, error) { - occurredAt, err := ptypes.Timestamp(request.Event.OccurredAt) + occurredAt, err := ptypes.Timestamp(request.GetEvent().GetOccurredAt()) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to marshal occurred at timestamp") } return &models.NodeExecutionEvent{ NodeExecutionKey: models.NodeExecutionKey{ - NodeID: request.Event.Id.NodeId, + NodeID: request.GetEvent().GetId().GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: request.Event.Id.ExecutionId.Project, - Domain: request.Event.Id.ExecutionId.Domain, - Name: request.Event.Id.ExecutionId.Name, + Project: request.GetEvent().GetId().GetExecutionId().GetProject(), + Domain: request.GetEvent().GetId().GetExecutionId().GetDomain(), + Name: request.GetEvent().GetId().GetExecutionId().GetName(), }, }, - RequestID: request.RequestId, + RequestID: request.GetRequestId(), OccurredAt: occurredAt, - Phase: request.Event.Phase.String(), + Phase: request.GetEvent().GetPhase().String(), }, nil } diff --git a/flyteadmin/pkg/repositories/transformers/node_execution_test.go b/flyteadmin/pkg/repositories/transformers/node_execution_test.go index a52c8e76a3..e37d312612 100644 --- a/flyteadmin/pkg/repositories/transformers/node_execution_test.go +++ b/flyteadmin/pkg/repositories/transformers/node_execution_test.go @@ -72,7 +72,7 @@ func TestAddRunningState(t *testing.T) { err := addNodeRunningState(&request, &nodeExecutionModel, &closure) assert.Nil(t, err) assert.Equal(t, startedAt, *nodeExecutionModel.StartedAt) - assert.True(t, proto.Equal(startedAtProto, closure.StartedAt)) + assert.True(t, proto.Equal(startedAtProto, closure.GetStartedAt())) } func TestAddTerminalState_OutputURI(t *testing.T) { @@ -251,9 +251,9 @@ func TestCreateNodeExecutionModel(t *testing.T) { UpdatedAt: occurredAtProto, TargetMetadata: &admin.NodeExecutionClosure_TaskNodeMetadata{ TaskNodeMetadata: &admin.TaskNodeMetadata{ - CacheStatus: request.Event.GetTaskNodeMetadata().CacheStatus, - CatalogKey: request.Event.GetTaskNodeMetadata().CatalogKey, - CheckpointUri: request.Event.GetTaskNodeMetadata().CheckpointUri, + CacheStatus: request.GetEvent().GetTaskNodeMetadata().GetCacheStatus(), + CatalogKey: request.GetEvent().GetTaskNodeMetadata().GetCatalogKey(), + CheckpointUri: request.GetEvent().GetTaskNodeMetadata().GetCheckpointUri(), }, }, } @@ -266,7 +266,7 @@ func TestCreateNodeExecutionModel(t *testing.T) { EventVersion: 2, } internalDataBytes, _ := proto.Marshal(internalData) - cacheStatus := request.Event.GetTaskNodeMetadata().CacheStatus.String() + cacheStatus := request.GetEvent().GetTaskNodeMetadata().GetCacheStatus().String() assert.Equal(t, &models.NodeExecution{ NodeExecutionKey: models.NodeExecutionKey{ NodeID: "node id", @@ -383,7 +383,7 @@ func TestUpdateNodeExecutionModel(t *testing.T) { assert.Equal(t, occurredAt, *nodeExecutionModel.StartedAt) assert.EqualValues(t, occurredAt, *nodeExecutionModel.NodeExecutionUpdatedAt) assert.NotNil(t, nodeExecutionModel.CacheStatus) - assert.Equal(t, *nodeExecutionModel.CacheStatus, request.Event.GetTaskNodeMetadata().CacheStatus.String()) + assert.Equal(t, *nodeExecutionModel.CacheStatus, request.GetEvent().GetTaskNodeMetadata().GetCacheStatus().String()) assert.Equal(t, nodeExecutionModel.DynamicWorkflowRemoteClosureReference, dynamicWorkflowClosureRef) var closure = &admin.NodeExecutionClosure{ @@ -392,12 +392,12 @@ func TestUpdateNodeExecutionModel(t *testing.T) { UpdatedAt: occurredAtProto, TargetMetadata: &admin.NodeExecutionClosure_TaskNodeMetadata{ TaskNodeMetadata: &admin.TaskNodeMetadata{ - CacheStatus: request.Event.GetTaskNodeMetadata().CacheStatus, - CatalogKey: request.Event.GetTaskNodeMetadata().CatalogKey, - CheckpointUri: request.Event.GetTaskNodeMetadata().CheckpointUri, + CacheStatus: request.GetEvent().GetTaskNodeMetadata().GetCacheStatus(), + CatalogKey: request.GetEvent().GetTaskNodeMetadata().GetCatalogKey(), + CheckpointUri: request.GetEvent().GetTaskNodeMetadata().GetCheckpointUri(), }, }, - DynamicJobSpecUri: request.Event.GetTaskNodeMetadata().DynamicWorkflow.DynamicJobSpecUri, + DynamicJobSpecUri: request.GetEvent().GetTaskNodeMetadata().GetDynamicWorkflow().GetDynamicJobSpecUri(), } var closureBytes, _ = proto.Marshal(closure) assert.Equal(t, nodeExecutionModel.Closure, closureBytes) @@ -553,7 +553,7 @@ func TestFromNodeExecutionModel_Error(t *testing.T) { expectedExecErr := execErr expectedExecErr.Message = string(make([]byte, trimmedErrMessageLen)) assert.Nil(t, err) - assert.True(t, proto.Equal(expectedExecErr, nodeExecution.Closure.GetError())) + assert.True(t, proto.Equal(expectedExecErr, nodeExecution.GetClosure().GetError())) } func TestFromNodeExecutionModelWithChildren(t *testing.T) { diff --git a/flyteadmin/pkg/repositories/transformers/project.go b/flyteadmin/pkg/repositories/transformers/project.go index e1e6e90f84..b9690cecc5 100644 --- a/flyteadmin/pkg/repositories/transformers/project.go +++ b/flyteadmin/pkg/repositories/transformers/project.go @@ -14,12 +14,12 @@ type CreateProjectModelInput struct { } func CreateProjectModel(project *admin.Project) models.Project { - stateInt := int32(project.State) - if project.Labels == nil { + stateInt := int32(project.GetState()) + if project.GetLabels() == nil { return models.Project{ - Identifier: project.Id, - Name: project.Name, - Description: project.Description, + Identifier: project.GetId(), + Name: project.GetName(), + Description: project.GetDescription(), State: &stateInt, } } @@ -28,9 +28,9 @@ func CreateProjectModel(project *admin.Project) models.Project { return models.Project{} } return models.Project{ - Identifier: project.Id, - Name: project.Name, - Description: project.Description, + Identifier: project.GetId(), + Name: project.GetName(), + Description: project.GetDescription(), Labels: projectBytes, State: &stateInt, } @@ -46,7 +46,7 @@ func FromProjectModel(projectModel models.Project, domains []*admin.Domain) *adm Id: projectModel.Identifier, Name: projectModel.Name, Description: projectModel.Description, - Labels: projectDeserialized.Labels, + Labels: projectDeserialized.GetLabels(), State: admin.Project_ProjectState(*projectModel.State), } project.Domains = domains diff --git a/flyteadmin/pkg/repositories/transformers/project_test.go b/flyteadmin/pkg/repositories/transformers/project_test.go index 914194f1dc..d9bedb2038 100644 --- a/flyteadmin/pkg/repositories/transformers/project_test.go +++ b/flyteadmin/pkg/repositories/transformers/project_test.go @@ -95,10 +95,10 @@ func TestFromProjectModels(t *testing.T) { projects := FromProjectModels(projectModels, domains) assert.Len(t, projects, 2) for index, project := range projects { - assert.Equal(t, fmt.Sprintf("proj%v_id", index+1), project.Id) - assert.Equal(t, fmt.Sprintf("proj%v_name", index+1), project.Name) - assert.Equal(t, fmt.Sprintf("proj%v_description", index+1), project.Description) - assert.Equal(t, admin.Project_ACTIVE, project.State) - assert.EqualValues(t, domains, project.Domains) + assert.Equal(t, fmt.Sprintf("proj%v_id", index+1), project.GetId()) + assert.Equal(t, fmt.Sprintf("proj%v_name", index+1), project.GetName()) + assert.Equal(t, fmt.Sprintf("proj%v_description", index+1), project.GetDescription()) + assert.Equal(t, admin.Project_ACTIVE, project.GetState()) + assert.EqualValues(t, domains, project.GetDomains()) } } diff --git a/flyteadmin/pkg/repositories/transformers/resource.go b/flyteadmin/pkg/repositories/transformers/resource.go index 36b5ddbd6a..4b4a226c92 100644 --- a/flyteadmin/pkg/repositories/transformers/resource.go +++ b/flyteadmin/pkg/repositories/transformers/resource.go @@ -14,14 +14,14 @@ import ( ) func WorkflowAttributesToResourceModel(attributes *admin.WorkflowAttributes, resource admin.MatchableResource) (models.Resource, error) { - attributeBytes, err := proto.Marshal(attributes.MatchingAttributes) + attributeBytes, err := proto.Marshal(attributes.GetMatchingAttributes()) if err != nil { return models.Resource{}, err } return models.Resource{ - Project: attributes.Project, - Domain: attributes.Domain, - Workflow: attributes.Workflow, + Project: attributes.GetProject(), + Domain: attributes.GetDomain(), + Workflow: attributes.GetWorkflow(), ResourceType: resource.String(), Priority: models.ResourcePriorityWorkflowLevel, Attributes: attributeBytes, @@ -31,15 +31,15 @@ func WorkflowAttributesToResourceModel(attributes *admin.WorkflowAttributes, res func mergeUpdatePluginOverrides(existingAttributes *admin.MatchingAttributes, newMatchingAttributes *admin.MatchingAttributes) *admin.MatchingAttributes { taskPluginOverrides := make(map[string]*admin.PluginOverride) - if existingAttributes.GetPluginOverrides() != nil && len(existingAttributes.GetPluginOverrides().Overrides) > 0 { - for _, pluginOverride := range existingAttributes.GetPluginOverrides().Overrides { - taskPluginOverrides[pluginOverride.TaskType] = pluginOverride + if existingAttributes.GetPluginOverrides() != nil && len(existingAttributes.GetPluginOverrides().GetOverrides()) > 0 { + for _, pluginOverride := range existingAttributes.GetPluginOverrides().GetOverrides() { + taskPluginOverrides[pluginOverride.GetTaskType()] = pluginOverride } } if newMatchingAttributes.GetPluginOverrides() != nil && - len(newMatchingAttributes.GetPluginOverrides().Overrides) > 0 { - for _, pluginOverride := range newMatchingAttributes.GetPluginOverrides().Overrides { - taskPluginOverrides[pluginOverride.TaskType] = pluginOverride + len(newMatchingAttributes.GetPluginOverrides().GetOverrides()) > 0 { + for _, pluginOverride := range newMatchingAttributes.GetPluginOverrides().GetOverrides() { + taskPluginOverrides[pluginOverride.GetTaskType()] = pluginOverride } } @@ -99,13 +99,13 @@ func FromResourceModelToWorkflowAttributes(model models.Resource) (admin.Workflo } func ProjectDomainAttributesToResourceModel(attributes *admin.ProjectDomainAttributes, resource admin.MatchableResource) (models.Resource, error) { - attributeBytes, err := proto.Marshal(attributes.MatchingAttributes) + attributeBytes, err := proto.Marshal(attributes.GetMatchingAttributes()) if err != nil { return models.Resource{}, err } return models.Resource{ - Project: attributes.Project, - Domain: attributes.Domain, + Project: attributes.GetProject(), + Domain: attributes.GetDomain(), ResourceType: resource.String(), Priority: models.ResourcePriorityProjectDomainLevel, Attributes: attributeBytes, @@ -113,12 +113,12 @@ func ProjectDomainAttributesToResourceModel(attributes *admin.ProjectDomainAttri } func ProjectAttributesToResourceModel(attributes *admin.ProjectAttributes, resource admin.MatchableResource) (models.Resource, error) { - attributeBytes, err := proto.Marshal(attributes.MatchingAttributes) + attributeBytes, err := proto.Marshal(attributes.GetMatchingAttributes()) if err != nil { return models.Resource{}, err } return models.Resource{ - Project: attributes.Project, + Project: attributes.GetProject(), ResourceType: resource.String(), Priority: models.ResourcePriorityProjectLevel, Attributes: attributeBytes, diff --git a/flyteadmin/pkg/repositories/transformers/resource_test.go b/flyteadmin/pkg/repositories/transformers/resource_test.go index 6efcc89fc0..a1ef2cacef 100644 --- a/flyteadmin/pkg/repositories/transformers/resource_test.go +++ b/flyteadmin/pkg/repositories/transformers/resource_test.go @@ -97,15 +97,15 @@ func TestMergeUpdateProjectDomainAttributes(t *testing.T) { assert.NoError(t, err) var sawPythonTask, sawSidecarTask, sawHiveTask bool for _, override := range updatedAttributes.GetPluginOverrides().GetOverrides() { - if override.TaskType == "python" { + if override.GetTaskType() == "python" { sawPythonTask = true - assert.EqualValues(t, []string{"plugin_a"}, override.PluginId) - } else if override.TaskType == "sidecar" { + assert.EqualValues(t, []string{"plugin_a"}, override.GetPluginId()) + } else if override.GetTaskType() == "sidecar" { sawSidecarTask = true - assert.EqualValues(t, []string{"plugin_c"}, override.PluginId) - } else if override.TaskType == "hive" { + assert.EqualValues(t, []string{"plugin_c"}, override.GetPluginId()) + } else if override.GetTaskType() == "hive" { sawHiveTask = true - assert.EqualValues(t, []string{"plugin_d"}, override.PluginId) + assert.EqualValues(t, []string{"plugin_d"}, override.GetPluginId()) } } assert.True(t, sawPythonTask, "Missing python task from finalized attributes") @@ -194,15 +194,15 @@ func TestMergeUpdateWorkflowAttributes(t *testing.T) { assert.NoError(t, err) var sawPythonTask, sawSidecarTask, sawHiveTask bool for _, override := range updatedAttributes.GetPluginOverrides().GetOverrides() { - if override.TaskType == "python" { + if override.GetTaskType() == "python" { sawPythonTask = true - assert.EqualValues(t, []string{"plugin_a"}, override.PluginId) - } else if override.TaskType == "sidecar" { + assert.EqualValues(t, []string{"plugin_a"}, override.GetPluginId()) + } else if override.GetTaskType() == "sidecar" { sawSidecarTask = true - assert.EqualValues(t, []string{"plugin_c"}, override.PluginId) - } else if override.TaskType == "hive" { + assert.EqualValues(t, []string{"plugin_c"}, override.GetPluginId()) + } else if override.GetTaskType() == "hive" { sawHiveTask = true - assert.EqualValues(t, []string{"plugin_d"}, override.PluginId) + assert.EqualValues(t, []string{"plugin_d"}, override.GetPluginId()) } } assert.True(t, sawPythonTask, "Missing python task from finalized attributes") diff --git a/flyteadmin/pkg/repositories/transformers/signal.go b/flyteadmin/pkg/repositories/transformers/signal.go index bbef0a00eb..5cb1b37ef4 100644 --- a/flyteadmin/pkg/repositories/transformers/signal.go +++ b/flyteadmin/pkg/repositories/transformers/signal.go @@ -14,21 +14,21 @@ func CreateSignalModel(signalID *core.SignalIdentifier, signalType *core.Literal signalModel := models.Signal{} if signalID != nil { signalKey := &signalModel.SignalKey - if signalID.ExecutionId != nil { + if signalID.GetExecutionId() != nil { executionKey := &signalKey.ExecutionKey - if len(signalID.ExecutionId.Project) > 0 { - executionKey.Project = signalID.ExecutionId.Project + if len(signalID.GetExecutionId().GetProject()) > 0 { + executionKey.Project = signalID.GetExecutionId().GetProject() } - if len(signalID.ExecutionId.Domain) > 0 { - executionKey.Domain = signalID.ExecutionId.Domain + if len(signalID.GetExecutionId().GetDomain()) > 0 { + executionKey.Domain = signalID.GetExecutionId().GetDomain() } - if len(signalID.ExecutionId.Name) > 0 { - executionKey.Name = signalID.ExecutionId.Name + if len(signalID.GetExecutionId().GetName()) > 0 { + executionKey.Name = signalID.GetExecutionId().GetName() } } - if len(signalID.SignalId) > 0 { - signalKey.SignalID = signalID.SignalId + if len(signalID.GetSignalId()) > 0 { + signalKey.SignalID = signalID.GetSignalId() } } diff --git a/flyteadmin/pkg/repositories/transformers/signal_test.go b/flyteadmin/pkg/repositories/transformers/signal_test.go index a54d5f1437..82637e06f3 100644 --- a/flyteadmin/pkg/repositories/transformers/signal_test.go +++ b/flyteadmin/pkg/repositories/transformers/signal_test.go @@ -82,7 +82,7 @@ func TestCreateSignalModel(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - signalModel, err := CreateSignalModel(test.proto.Id, test.proto.Type, test.proto.Value) + signalModel, err := CreateSignalModel(test.proto.GetId(), test.proto.GetType(), test.proto.GetValue()) assert.NoError(t, err) assert.Equal(t, test.model, signalModel) diff --git a/flyteadmin/pkg/repositories/transformers/task.go b/flyteadmin/pkg/repositories/transformers/task.go index a8baf355e7..6c64ee2ec0 100644 --- a/flyteadmin/pkg/repositories/transformers/task.go +++ b/flyteadmin/pkg/repositories/transformers/task.go @@ -22,15 +22,15 @@ func CreateTaskModel( return models.Task{}, errors.NewFlyteAdminError(codes.Internal, "Failed to serialize task closure") } var taskType string - if taskClosure.CompiledTask != nil && taskClosure.CompiledTask.Template != nil { - taskType = taskClosure.CompiledTask.Template.Type + if taskClosure.GetCompiledTask() != nil && taskClosure.GetCompiledTask().GetTemplate() != nil { + taskType = taskClosure.GetCompiledTask().GetTemplate().GetType() } return models.Task{ TaskKey: models.TaskKey{ - Project: request.Id.Project, - Domain: request.Id.Domain, - Name: request.Id.Name, - Version: request.Id.Version, + Project: request.GetId().GetProject(), + Domain: request.GetId().GetDomain(), + Name: request.GetId().GetName(), + Version: request.GetId().GetVersion(), }, Closure: closureBytes, Digest: digest, diff --git a/flyteadmin/pkg/repositories/transformers/task_execution.go b/flyteadmin/pkg/repositories/transformers/task_execution.go index 9f24ed2aa4..354291cd64 100644 --- a/flyteadmin/pkg/repositories/transformers/task_execution.go +++ b/flyteadmin/pkg/repositories/transformers/task_execution.go @@ -34,7 +34,7 @@ type CreateTaskExecutionModelInput struct { func addTaskStartedState(request *admin.TaskExecutionEventRequest, taskExecutionModel *models.TaskExecution, closure *admin.TaskExecutionClosure) error { - occurredAt, err := ptypes.Timestamp(request.Event.OccurredAt) + occurredAt, err := ptypes.Timestamp(request.GetEvent().GetOccurredAt()) if err != nil { return errors.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal occurredAt with error: %v", err) } @@ -43,7 +43,7 @@ func addTaskStartedState(request *admin.TaskExecutionEventRequest, taskExecution // This check makes sure any out of order if taskExecutionModel.StartedAt == nil { taskExecutionModel.StartedAt = &occurredAt - closure.StartedAt = request.Event.OccurredAt + closure.StartedAt = request.GetEvent().GetOccurredAt() } return nil } @@ -56,7 +56,7 @@ func addTaskTerminalState( if taskExecutionModel.StartedAt == nil { logger.Warning(context.Background(), "task execution is missing StartedAt") } else { - endTime, err := ptypes.Timestamp(request.Event.OccurredAt) + endTime, err := ptypes.Timestamp(request.GetEvent().GetOccurredAt()) if err != nil { return errors.NewFlyteAdminErrorf( codes.Internal, "Failed to parse task execution occurredAt timestamp: %v", err) @@ -70,23 +70,23 @@ func addTaskTerminalState( closure.Duration = ptypes.DurationProto(taskExecutionModel.Duration) } - if request.Event.GetOutputUri() != "" { + if request.GetEvent().GetOutputUri() != "" { closure.OutputResult = &admin.TaskExecutionClosure_OutputUri{ - OutputUri: request.Event.GetOutputUri(), + OutputUri: request.GetEvent().GetOutputUri(), } - } else if request.Event.GetOutputData() != nil { + } else if request.GetEvent().GetOutputData() != nil { switch inlineEventDataPolicy { case interfaces.InlineEventDataPolicyStoreInline: closure.OutputResult = &admin.TaskExecutionClosure_OutputData{ - OutputData: request.Event.GetOutputData(), + OutputData: request.GetEvent().GetOutputData(), } default: logger.Debugf(ctx, "Offloading outputs per InlineEventDataPolicy") - uri, err := common.OffloadLiteralMap(ctx, storageClient, request.Event.GetOutputData(), - request.Event.ParentNodeExecutionId.ExecutionId.Project, request.Event.ParentNodeExecutionId.ExecutionId.Domain, - request.Event.ParentNodeExecutionId.ExecutionId.Name, request.Event.ParentNodeExecutionId.NodeId, - request.Event.TaskId.Project, request.Event.TaskId.Domain, request.Event.TaskId.Name, request.Event.TaskId.Version, - strconv.FormatUint(uint64(request.Event.RetryAttempt), 10), OutputsObjectSuffix) + uri, err := common.OffloadLiteralMap(ctx, storageClient, request.GetEvent().GetOutputData(), + request.GetEvent().GetParentNodeExecutionId().GetExecutionId().GetProject(), request.GetEvent().GetParentNodeExecutionId().GetExecutionId().GetDomain(), + request.GetEvent().GetParentNodeExecutionId().GetExecutionId().GetName(), request.GetEvent().GetParentNodeExecutionId().GetNodeId(), + request.GetEvent().GetTaskId().GetProject(), request.GetEvent().GetTaskId().GetDomain(), request.GetEvent().GetTaskId().GetName(), request.GetEvent().GetTaskId().GetVersion(), + strconv.FormatUint(uint64(request.GetEvent().GetRetryAttempt()), 10), OutputsObjectSuffix) if err != nil { return err } @@ -94,9 +94,9 @@ func addTaskTerminalState( OutputUri: uri.String(), } } - } else if request.Event.GetError() != nil { + } else if request.GetEvent().GetError() != nil { closure.OutputResult = &admin.TaskExecutionClosure_Error{ - Error: request.Event.GetError(), + Error: request.GetEvent().GetError(), } } return nil @@ -106,35 +106,35 @@ func CreateTaskExecutionModel(ctx context.Context, input CreateTaskExecutionMode taskExecution := &models.TaskExecution{ TaskExecutionKey: models.TaskExecutionKey{ TaskKey: models.TaskKey{ - Project: input.Request.Event.TaskId.Project, - Domain: input.Request.Event.TaskId.Domain, - Name: input.Request.Event.TaskId.Name, - Version: input.Request.Event.TaskId.Version, + Project: input.Request.GetEvent().GetTaskId().GetProject(), + Domain: input.Request.GetEvent().GetTaskId().GetDomain(), + Name: input.Request.GetEvent().GetTaskId().GetName(), + Version: input.Request.GetEvent().GetTaskId().GetVersion(), }, NodeExecutionKey: models.NodeExecutionKey{ - NodeID: input.Request.Event.ParentNodeExecutionId.NodeId, + NodeID: input.Request.GetEvent().GetParentNodeExecutionId().GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: input.Request.Event.ParentNodeExecutionId.ExecutionId.Project, - Domain: input.Request.Event.ParentNodeExecutionId.ExecutionId.Domain, - Name: input.Request.Event.ParentNodeExecutionId.ExecutionId.Name, + Project: input.Request.GetEvent().GetParentNodeExecutionId().GetExecutionId().GetProject(), + Domain: input.Request.GetEvent().GetParentNodeExecutionId().GetExecutionId().GetDomain(), + Name: input.Request.GetEvent().GetParentNodeExecutionId().GetExecutionId().GetName(), }, }, RetryAttempt: &input.Request.Event.RetryAttempt, }, - Phase: input.Request.Event.Phase.String(), - PhaseVersion: input.Request.Event.PhaseVersion, + Phase: input.Request.GetEvent().GetPhase().String(), + PhaseVersion: input.Request.GetEvent().GetPhaseVersion(), } err := handleTaskExecutionInputs(ctx, taskExecution, input.Request, input.StorageClient) if err != nil { return nil, err } - metadata := input.Request.Event.Metadata - if metadata != nil && len(metadata.ExternalResources) > 1 { - sort.Slice(metadata.ExternalResources, func(i, j int) bool { - a := metadata.ExternalResources[i] - b := metadata.ExternalResources[j] + metadata := input.Request.GetEvent().GetMetadata() + if metadata != nil && len(metadata.GetExternalResources()) > 1 { + sort.Slice(metadata.GetExternalResources(), func(i, j int) bool { + a := metadata.GetExternalResources()[i] + b := metadata.GetExternalResources()[j] if a.GetIndex() == b.GetIndex() { return a.GetRetryAttempt() < b.GetRetryAttempt() } @@ -142,41 +142,41 @@ func CreateTaskExecutionModel(ctx context.Context, input CreateTaskExecutionMode }) } - reportedAt := input.Request.Event.ReportedAt - if reportedAt == nil || (reportedAt.Seconds == 0 && reportedAt.Nanos == 0) { - reportedAt = input.Request.Event.OccurredAt + reportedAt := input.Request.GetEvent().GetReportedAt() + if reportedAt == nil || (reportedAt.GetSeconds() == 0 && reportedAt.GetNanos() == 0) { + reportedAt = input.Request.GetEvent().GetOccurredAt() } closure := &admin.TaskExecutionClosure{ - Phase: input.Request.Event.Phase, + Phase: input.Request.GetEvent().GetPhase(), UpdatedAt: reportedAt, - CreatedAt: input.Request.Event.OccurredAt, - Logs: input.Request.Event.Logs, - CustomInfo: input.Request.Event.CustomInfo, - TaskType: input.Request.Event.TaskType, + CreatedAt: input.Request.GetEvent().GetOccurredAt(), + Logs: input.Request.GetEvent().GetLogs(), + CustomInfo: input.Request.GetEvent().GetCustomInfo(), + TaskType: input.Request.GetEvent().GetTaskType(), Metadata: metadata, - EventVersion: input.Request.Event.EventVersion, + EventVersion: input.Request.GetEvent().GetEventVersion(), } - if len(input.Request.Event.Reasons) > 0 { - for _, reason := range input.Request.Event.Reasons { - closure.Reasons = append(closure.Reasons, &admin.Reason{ - OccurredAt: reason.OccurredAt, - Message: reason.Reason, + if len(input.Request.GetEvent().GetReasons()) > 0 { + for _, reason := range input.Request.GetEvent().GetReasons() { + closure.Reasons = append(closure.GetReasons(), &admin.Reason{ + OccurredAt: reason.GetOccurredAt(), + Message: reason.GetReason(), }) } - closure.Reason = input.Request.Event.Reasons[len(input.Request.Event.Reasons)-1].Reason - } else if len(input.Request.Event.Reason) > 0 { + closure.Reason = input.Request.GetEvent().GetReasons()[len(input.Request.GetEvent().GetReasons())-1].GetReason() + } else if len(input.Request.GetEvent().GetReason()) > 0 { closure.Reasons = []*admin.Reason{ { - OccurredAt: input.Request.Event.OccurredAt, - Message: input.Request.Event.Reason, + OccurredAt: input.Request.GetEvent().GetOccurredAt(), + Message: input.Request.GetEvent().GetReason(), }, } - closure.Reason = input.Request.Event.Reason + closure.Reason = input.Request.GetEvent().GetReason() } - eventPhase := input.Request.Event.Phase + eventPhase := input.Request.GetEvent().GetPhase() // Different tasks may report different phases as their first event. // If the first event we receive for this execution is a valid @@ -188,7 +188,7 @@ func CreateTaskExecutionModel(ctx context.Context, input CreateTaskExecutionMode } } - if common.IsTaskExecutionTerminal(input.Request.Event.Phase) { + if common.IsTaskExecutionTerminal(input.Request.GetEvent().GetPhase()) { err := addTaskTerminalState(ctx, input.Request, taskExecution, closure, input.InlineEventDataPolicy, input.StorageClient) if err != nil { return nil, err @@ -201,7 +201,7 @@ func CreateTaskExecutionModel(ctx context.Context, input CreateTaskExecutionMode } taskExecution.Closure = marshaledClosure - taskExecutionCreatedAt, err := ptypes.Timestamp(input.Request.Event.OccurredAt) + taskExecutionCreatedAt, err := ptypes.Timestamp(input.Request.GetEvent().GetOccurredAt()) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to read event timestamp") } @@ -232,17 +232,17 @@ func mergeLogs(existing, latest []*core.TaskLog) []*core.TaskLog { latestSetByURI := make(map[string]*core.TaskLog, len(latest)) latestSetByName := make(map[string]*core.TaskLog, len(latest)) for _, latestLog := range latest { - latestSetByURI[latestLog.Uri] = latestLog - if len(latestLog.Name) > 0 { - latestSetByName[latestLog.Name] = latestLog + latestSetByURI[latestLog.GetUri()] = latestLog + if len(latestLog.GetName()) > 0 { + latestSetByName[latestLog.GetName()] = latestLog } } // Copy over the latest logs since names will change for existing logs as a task transitions across phases. logs := latest for _, existingLog := range existing { - if _, ok := latestSetByURI[existingLog.Uri]; !ok { - if _, ok = latestSetByName[existingLog.Name]; !ok { + if _, ok := latestSetByURI[existingLog.GetUri()]; !ok { + if _, ok = latestSetByName[existingLog.GetName()]; !ok { // We haven't seen this log before: add it to the output result list. logs = append(logs, existingLog) } @@ -299,16 +299,16 @@ func mergeExternalResource(existing, latest *event.ExternalResourceInfo) *event. return existing } - if latest.ExternalId != "" && existing.ExternalId != latest.ExternalId { - existing.ExternalId = latest.ExternalId + if latest.GetExternalId() != "" && existing.GetExternalId() != latest.GetExternalId() { + existing.ExternalId = latest.GetExternalId() } // note we are not updating existing.Index and existing.RetryAttempt because they are the // search key for our ExternalResource pool. - existing.Phase = latest.Phase - if latest.CacheStatus != core.CatalogCacheStatus_CACHE_DISABLED && existing.CacheStatus != latest.CacheStatus { - existing.CacheStatus = latest.CacheStatus + existing.Phase = latest.GetPhase() + if latest.GetCacheStatus() != core.CatalogCacheStatus_CACHE_DISABLED && existing.GetCacheStatus() != latest.GetCacheStatus() { + existing.CacheStatus = latest.GetCacheStatus() } - existing.Logs = mergeLogs(existing.Logs, latest.Logs) + existing.Logs = mergeLogs(existing.GetLogs(), latest.GetLogs()) return existing } @@ -357,16 +357,16 @@ func mergeMetadata(existing, latest *event.TaskExecutionMetadata) *event.TaskExe return existing } - if latest.GeneratedName != "" && existing.GeneratedName != latest.GeneratedName { - existing.GeneratedName = latest.GeneratedName + if latest.GetGeneratedName() != "" && existing.GetGeneratedName() != latest.GetGeneratedName() { + existing.GeneratedName = latest.GetGeneratedName() } - existing.ExternalResources = mergeExternalResources(existing.ExternalResources, latest.ExternalResources) - existing.ResourcePoolInfo = latest.ResourcePoolInfo - if latest.PluginIdentifier != "" && existing.PluginIdentifier != latest.PluginIdentifier { - existing.PluginIdentifier = latest.PluginIdentifier + existing.ExternalResources = mergeExternalResources(existing.GetExternalResources(), latest.GetExternalResources()) + existing.ResourcePoolInfo = latest.GetResourcePoolInfo() + if latest.GetPluginIdentifier() != "" && existing.GetPluginIdentifier() != latest.GetPluginIdentifier() { + existing.PluginIdentifier = latest.GetPluginIdentifier() } - if latest.InstanceClass != event.TaskExecutionMetadata_DEFAULT && existing.InstanceClass != latest.InstanceClass { - existing.InstanceClass = latest.InstanceClass + if latest.GetInstanceClass() != event.TaskExecutionMetadata_DEFAULT && existing.GetInstanceClass() != latest.GetInstanceClass() { + existing.InstanceClass = latest.GetInstanceClass() } return existing @@ -374,7 +374,7 @@ func mergeMetadata(existing, latest *event.TaskExecutionMetadata) *event.TaskExe func filterExternalResourceLogsByPhase(externalResources []*event.ExternalResourceInfo, phase core.TaskExecution_Phase) { for _, externalResource := range externalResources { - externalResource.Logs = filterLogsByPhase(externalResource.Logs, phase) + externalResource.Logs = filterLogsByPhase(externalResource.GetLogs(), phase) } } @@ -382,13 +382,13 @@ func filterLogsByPhase(logs []*core.TaskLog, phase core.TaskExecution_Phase) []* filteredLogs := make([]*core.TaskLog, 0, len(logs)) for _, l := range logs { - if common.IsTaskExecutionTerminal(phase) && l.HideOnceFinished { + if common.IsTaskExecutionTerminal(phase) && l.GetHideOnceFinished() { continue } // Some plugins like e.g. Dask, Ray start with or very quickly transition to core.TaskExecution_INITIALIZING // once the CR has been created even though the underlying pods are still pending. We thus treat queued and // initializing the same here. - if (phase == core.TaskExecution_QUEUED || phase == core.TaskExecution_INITIALIZING) && !l.ShowWhilePending { + if (phase == core.TaskExecution_QUEUED || phase == core.TaskExecution_INITIALIZING) && !l.GetShowWhilePending() { continue } filteredLogs = append(filteredLogs, l) @@ -409,45 +409,45 @@ func UpdateTaskExecutionModel(ctx context.Context, request *admin.TaskExecutionE return errors.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal task execution closure with error: %+v", err) } - isPhaseChange := taskExecutionModel.Phase != request.Event.Phase.String() + isPhaseChange := taskExecutionModel.Phase != request.GetEvent().GetPhase().String() existingTaskPhase := taskExecutionModel.Phase - taskExecutionModel.Phase = request.Event.Phase.String() - taskExecutionModel.PhaseVersion = request.Event.PhaseVersion - taskExecutionClosure.Phase = request.Event.Phase - reportedAt := request.Event.ReportedAt - if reportedAt == nil || (reportedAt.Seconds == 0 && reportedAt.Nanos == 0) { - reportedAt = request.Event.OccurredAt + taskExecutionModel.Phase = request.GetEvent().GetPhase().String() + taskExecutionModel.PhaseVersion = request.GetEvent().GetPhaseVersion() + taskExecutionClosure.Phase = request.GetEvent().GetPhase() + reportedAt := request.GetEvent().GetReportedAt() + if reportedAt == nil || (reportedAt.GetSeconds() == 0 && reportedAt.GetNanos() == 0) { + reportedAt = request.GetEvent().GetOccurredAt() } taskExecutionClosure.UpdatedAt = reportedAt - mergedLogs := mergeLogs(taskExecutionClosure.Logs, request.Event.Logs) - filteredLogs := filterLogsByPhase(mergedLogs, request.Event.Phase) + mergedLogs := mergeLogs(taskExecutionClosure.GetLogs(), request.GetEvent().GetLogs()) + filteredLogs := filterLogsByPhase(mergedLogs, request.GetEvent().GetPhase()) taskExecutionClosure.Logs = filteredLogs - if len(request.Event.Reasons) > 0 { - for _, reason := range request.Event.Reasons { + if len(request.GetEvent().GetReasons()) > 0 { + for _, reason := range request.GetEvent().GetReasons() { taskExecutionClosure.Reasons = append( - taskExecutionClosure.Reasons, + taskExecutionClosure.GetReasons(), &admin.Reason{ - OccurredAt: reason.OccurredAt, - Message: reason.Reason, + OccurredAt: reason.GetOccurredAt(), + Message: reason.GetReason(), }) } - taskExecutionClosure.Reason = request.Event.Reasons[len(request.Event.Reasons)-1].Reason - } else if len(request.Event.Reason) > 0 { - if taskExecutionClosure.Reason != request.Event.Reason { + taskExecutionClosure.Reason = request.GetEvent().GetReasons()[len(request.GetEvent().GetReasons())-1].GetReason() + } else if len(request.GetEvent().GetReason()) > 0 { + if taskExecutionClosure.GetReason() != request.GetEvent().GetReason() { // by tracking a time-series of reasons we increase the size of the TaskExecutionClosure in scenarios where // a task reports a large number of unique reasons. if this size increase becomes problematic we this logic // will need to be revisited. taskExecutionClosure.Reasons = append( - taskExecutionClosure.Reasons, + taskExecutionClosure.GetReasons(), &admin.Reason{ - OccurredAt: request.Event.OccurredAt, - Message: request.Event.Reason, + OccurredAt: request.GetEvent().GetOccurredAt(), + Message: request.GetEvent().GetReason(), }) } - taskExecutionClosure.Reason = request.Event.Reason + taskExecutionClosure.Reason = request.GetEvent().GetReason() } if existingTaskPhase != core.TaskExecution_RUNNING.String() && taskExecutionModel.Phase == core.TaskExecution_RUNNING.String() { err = addTaskStartedState(request, taskExecutionModel, &taskExecutionClosure) @@ -456,24 +456,24 @@ func UpdateTaskExecutionModel(ctx context.Context, request *admin.TaskExecutionE } } - if common.IsTaskExecutionTerminal(request.Event.Phase) { + if common.IsTaskExecutionTerminal(request.GetEvent().GetPhase()) { err := addTaskTerminalState(ctx, request, taskExecutionModel, &taskExecutionClosure, inlineEventDataPolicy, storageClient) if err != nil { return err } } - taskExecutionClosure.CustomInfo, err = mergeCustom(taskExecutionClosure.CustomInfo, request.Event.CustomInfo) + taskExecutionClosure.CustomInfo, err = mergeCustom(taskExecutionClosure.GetCustomInfo(), request.GetEvent().GetCustomInfo()) if err != nil { return errors.NewFlyteAdminErrorf(codes.Internal, "failed to merge task event custom_info with error: %v", err) } - taskExecutionClosure.Metadata = mergeMetadata(taskExecutionClosure.Metadata, request.Event.Metadata) + taskExecutionClosure.Metadata = mergeMetadata(taskExecutionClosure.GetMetadata(), request.GetEvent().GetMetadata()) - if isPhaseChange && taskExecutionClosure.Metadata != nil && len(taskExecutionClosure.Metadata.ExternalResources) > 0 { - filterExternalResourceLogsByPhase(taskExecutionClosure.Metadata.ExternalResources, request.Event.Phase) + if isPhaseChange && taskExecutionClosure.GetMetadata() != nil && len(taskExecutionClosure.GetMetadata().GetExternalResources()) > 0 { + filterExternalResourceLogsByPhase(taskExecutionClosure.GetMetadata().GetExternalResources(), request.GetEvent().GetPhase()) } - if request.Event.EventVersion > taskExecutionClosure.EventVersion { - taskExecutionClosure.EventVersion = request.Event.EventVersion + if request.GetEvent().GetEventVersion() > taskExecutionClosure.GetEventVersion() { + taskExecutionClosure.EventVersion = request.GetEvent().GetEventVersion() } marshaledClosure, err := proto.Marshal(&taskExecutionClosure) if err != nil { @@ -495,7 +495,7 @@ func FromTaskExecutionModel(taskExecutionModel models.TaskExecution, opts *Execu if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal closure") } - if closure.GetError() != nil && opts != nil && opts.TrimErrorMessage && len(closure.GetError().Message) > 0 { + if closure.GetError() != nil && opts != nil && opts.TrimErrorMessage && len(closure.GetError().GetMessage()) > 0 { trimmedErrOutputResult := closure.GetError() trimmedErrMessage := TrimErrorMessage(trimmedErrOutputResult.GetMessage()) trimmedErrOutputResult.Message = trimmedErrMessage @@ -551,15 +551,15 @@ func handleTaskExecutionInputs(ctx context.Context, taskExecutionModel *models.T // Inputs are static over the duration of the task execution, no need to update them when they're already set return nil } - switch request.Event.GetInputValue().(type) { + switch request.GetEvent().GetInputValue().(type) { case *event.TaskExecutionEvent_InputUri: taskExecutionModel.InputURI = request.GetEvent().GetInputUri() case *event.TaskExecutionEvent_InputData: uri, err := common.OffloadLiteralMap(ctx, storageClient, request.GetEvent().GetInputData(), - request.Event.ParentNodeExecutionId.ExecutionId.Project, request.Event.ParentNodeExecutionId.ExecutionId.Domain, - request.Event.ParentNodeExecutionId.ExecutionId.Name, request.Event.ParentNodeExecutionId.NodeId, - request.Event.TaskId.Project, request.Event.TaskId.Domain, request.Event.TaskId.Name, request.Event.TaskId.Version, - strconv.FormatUint(uint64(request.Event.RetryAttempt), 10), InputsObjectSuffix) + request.GetEvent().GetParentNodeExecutionId().GetExecutionId().GetProject(), request.GetEvent().GetParentNodeExecutionId().GetExecutionId().GetDomain(), + request.GetEvent().GetParentNodeExecutionId().GetExecutionId().GetName(), request.GetEvent().GetParentNodeExecutionId().GetNodeId(), + request.GetEvent().GetTaskId().GetProject(), request.GetEvent().GetTaskId().GetDomain(), request.GetEvent().GetTaskId().GetName(), request.GetEvent().GetTaskId().GetVersion(), + strconv.FormatUint(uint64(request.GetEvent().GetRetryAttempt()), 10), InputsObjectSuffix) if err != nil { return err } diff --git a/flyteadmin/pkg/repositories/transformers/task_execution_test.go b/flyteadmin/pkg/repositories/transformers/task_execution_test.go index 5fc5430192..461ab39c8c 100644 --- a/flyteadmin/pkg/repositories/transformers/task_execution_test.go +++ b/flyteadmin/pkg/repositories/transformers/task_execution_test.go @@ -85,7 +85,7 @@ func TestAddTaskStartedState(t *testing.T) { err := addTaskStartedState(&request, &taskExecutionModel, closure) assert.Nil(t, err) - timestamp, err := ptypes.Timestamp(closure.StartedAt) + timestamp, err := ptypes.Timestamp(closure.GetStartedAt()) assert.Nil(t, err) assert.Equal(t, startedAt, timestamp) assert.Equal(t, &startedAt, taskExecutionModel.StartedAt) @@ -109,7 +109,7 @@ func TestAddTaskStartedState(t *testing.T) { err := addTaskStartedState(&request, &taskExecutionModel, closure) assert.Nil(t, err) - timestamp, err := ptypes.Timestamp(closure.StartedAt) + timestamp, err := ptypes.Timestamp(closure.GetStartedAt()) assert.Nil(t, err) assert.NotEqual(t, oldStartedAt, timestamp) assert.Equal(t, &oldStartedAt, taskExecutionModel.StartedAt) @@ -169,7 +169,7 @@ func TestAddTaskTerminalState_OutputURI(t *testing.T) { duration, err := ptypes.Duration(closure.GetDuration()) assert.Nil(t, err) - assert.EqualValues(t, request.Event.OutputResult, closure.OutputResult) + assert.EqualValues(t, request.GetEvent().GetOutputResult(), closure.GetOutputResult()) assert.EqualValues(t, outputURI, closure.GetOutputUri()) assert.EqualValues(t, time.Minute, duration) @@ -232,7 +232,7 @@ func TestAddTaskTerminalState_OutputData(t *testing.T) { duration, err := ptypes.Duration(closure.GetDuration()) assert.Nil(t, err) - assert.EqualValues(t, request.Event.OutputResult, closure.OutputResult) + assert.EqualValues(t, request.GetEvent().GetOutputResult(), closure.GetOutputResult()) assert.True(t, proto.Equal(outputData, closure.GetOutputData())) assert.EqualValues(t, time.Minute, duration) }) @@ -296,17 +296,17 @@ func TestCreateTaskExecutionModelQueued(t *testing.T) { assert.Equal(t, &models.TaskExecution{ TaskExecutionKey: models.TaskExecutionKey{ TaskKey: models.TaskKey{ - Project: sampleTaskID.Project, - Domain: sampleTaskID.Domain, - Name: sampleTaskID.Name, - Version: sampleTaskID.Version, + Project: sampleTaskID.GetProject(), + Domain: sampleTaskID.GetDomain(), + Name: sampleTaskID.GetName(), + Version: sampleTaskID.GetVersion(), }, NodeExecutionKey: models.NodeExecutionKey{ - NodeID: sampleNodeExecID.NodeId, + NodeID: sampleNodeExecID.GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: sampleNodeExecID.ExecutionId.Project, - Domain: sampleNodeExecID.ExecutionId.Domain, - Name: sampleNodeExecID.ExecutionId.Name, + Project: sampleNodeExecID.GetExecutionId().GetProject(), + Domain: sampleNodeExecID.GetExecutionId().GetDomain(), + Name: sampleNodeExecID.GetExecutionId().GetName(), }, }, RetryAttempt: &retryAttemptValue, @@ -370,7 +370,7 @@ func TestCreateTaskExecutionModelRunning(t *testing.T) { CustomInfo: &customInfo, } - t.Logf("expected %+v %+v\n", expectedClosure.Reason, expectedClosure.Reasons) + t.Logf("expected %+v %+v\n", expectedClosure.GetReason(), expectedClosure.GetReasons()) expectedClosureBytes, err := proto.Marshal(expectedClosure) assert.Nil(t, err) @@ -378,17 +378,17 @@ func TestCreateTaskExecutionModelRunning(t *testing.T) { assert.Equal(t, &models.TaskExecution{ TaskExecutionKey: models.TaskExecutionKey{ TaskKey: models.TaskKey{ - Project: sampleTaskID.Project, - Domain: sampleTaskID.Domain, - Name: sampleTaskID.Name, - Version: sampleTaskID.Version, + Project: sampleTaskID.GetProject(), + Domain: sampleTaskID.GetDomain(), + Name: sampleTaskID.GetName(), + Version: sampleTaskID.GetVersion(), }, NodeExecutionKey: models.NodeExecutionKey{ - NodeID: sampleNodeExecID.NodeId, + NodeID: sampleNodeExecID.GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: sampleNodeExecID.ExecutionId.Project, - Domain: sampleNodeExecID.ExecutionId.Domain, - Name: sampleNodeExecID.ExecutionId.Name, + Project: sampleNodeExecID.GetExecutionId().GetProject(), + Domain: sampleNodeExecID.GetExecutionId().GetDomain(), + Name: sampleNodeExecID.GetExecutionId().GetName(), }, }, RetryAttempt: &retryAttemptValue, @@ -522,17 +522,17 @@ func TestUpdateTaskExecutionModelRunningToFailed(t *testing.T) { existingTaskExecution := models.TaskExecution{ TaskExecutionKey: models.TaskExecutionKey{ TaskKey: models.TaskKey{ - Project: sampleTaskID.Project, - Domain: sampleTaskID.Domain, - Name: sampleTaskID.Name, - Version: sampleTaskID.Version, + Project: sampleTaskID.GetProject(), + Domain: sampleTaskID.GetDomain(), + Name: sampleTaskID.GetName(), + Version: sampleTaskID.GetVersion(), }, NodeExecutionKey: models.NodeExecutionKey{ - NodeID: sampleNodeExecID.NodeId, + NodeID: sampleNodeExecID.GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: sampleNodeExecID.ExecutionId.Project, - Domain: sampleNodeExecID.ExecutionId.Domain, - Name: sampleNodeExecID.ExecutionId.Name, + Project: sampleNodeExecID.GetExecutionId().GetProject(), + Domain: sampleNodeExecID.GetExecutionId().GetDomain(), + Name: sampleNodeExecID.GetExecutionId().GetName(), }, }, RetryAttempt: &retryAttemptValue, @@ -627,17 +627,17 @@ func TestUpdateTaskExecutionModelRunningToFailed(t *testing.T) { assert.EqualValues(t, models.TaskExecution{ TaskExecutionKey: models.TaskExecutionKey{ TaskKey: models.TaskKey{ - Project: sampleTaskID.Project, - Domain: sampleTaskID.Domain, - Name: sampleTaskID.Name, - Version: sampleTaskID.Version, + Project: sampleTaskID.GetProject(), + Domain: sampleTaskID.GetDomain(), + Name: sampleTaskID.GetName(), + Version: sampleTaskID.GetVersion(), }, NodeExecutionKey: models.NodeExecutionKey{ - NodeID: sampleNodeExecID.NodeId, + NodeID: sampleNodeExecID.GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: sampleNodeExecID.ExecutionId.Project, - Domain: sampleNodeExecID.ExecutionId.Domain, - Name: sampleNodeExecID.ExecutionId.Name, + Project: sampleNodeExecID.GetExecutionId().GetProject(), + Domain: sampleNodeExecID.GetExecutionId().GetDomain(), + Name: sampleNodeExecID.GetExecutionId().GetName(), }, }, RetryAttempt: &retryAttemptValue, @@ -675,17 +675,17 @@ func TestUpdateTaskExecutionModelFilterLogLinks(t *testing.T) { existingTaskExecution := models.TaskExecution{ TaskExecutionKey: models.TaskExecutionKey{ TaskKey: models.TaskKey{ - Project: sampleTaskID.Project, - Domain: sampleTaskID.Domain, - Name: sampleTaskID.Name, - Version: sampleTaskID.Version, + Project: sampleTaskID.GetProject(), + Domain: sampleTaskID.GetDomain(), + Name: sampleTaskID.GetName(), + Version: sampleTaskID.GetVersion(), }, NodeExecutionKey: models.NodeExecutionKey{ - NodeID: sampleNodeExecID.NodeId, + NodeID: sampleNodeExecID.GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: sampleNodeExecID.ExecutionId.Project, - Domain: sampleNodeExecID.ExecutionId.Domain, - Name: sampleNodeExecID.ExecutionId.Name, + Project: sampleNodeExecID.GetExecutionId().GetProject(), + Domain: sampleNodeExecID.GetExecutionId().GetDomain(), + Name: sampleNodeExecID.GetExecutionId().GetName(), }, }, RetryAttempt: &retryAttemptValue, @@ -729,7 +729,7 @@ func TestUpdateTaskExecutionModelFilterLogLinks(t *testing.T) { err = proto.Unmarshal(existingTaskExecution.Closure, updatedClosure) assert.Nil(t, err) - assert.Equal(t, updatedClosure.Logs, []*core.TaskLog{ + assert.Equal(t, updatedClosure.GetLogs(), []*core.TaskLog{ { Uri: "uri-show-pending", ShowWhilePending: true, @@ -776,17 +776,17 @@ func TestUpdateTaskExecutionModelFilterLogLinksArray(t *testing.T) { existingTaskExecution := models.TaskExecution{ TaskExecutionKey: models.TaskExecutionKey{ TaskKey: models.TaskKey{ - Project: sampleTaskID.Project, - Domain: sampleTaskID.Domain, - Name: sampleTaskID.Name, - Version: sampleTaskID.Version, + Project: sampleTaskID.GetProject(), + Domain: sampleTaskID.GetDomain(), + Name: sampleTaskID.GetName(), + Version: sampleTaskID.GetVersion(), }, NodeExecutionKey: models.NodeExecutionKey{ - NodeID: sampleNodeExecID.NodeId, + NodeID: sampleNodeExecID.GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: sampleNodeExecID.ExecutionId.Project, - Domain: sampleNodeExecID.ExecutionId.Domain, - Name: sampleNodeExecID.ExecutionId.Name, + Project: sampleNodeExecID.GetExecutionId().GetProject(), + Domain: sampleNodeExecID.GetExecutionId().GetDomain(), + Name: sampleNodeExecID.GetExecutionId().GetName(), }, }, RetryAttempt: &retryAttemptValue, @@ -821,7 +821,7 @@ func TestUpdateTaskExecutionModelFilterLogLinksArray(t *testing.T) { err = proto.Unmarshal(existingTaskExecution.Closure, updatedClosure) assert.Nil(t, err) - assert.Equal(t, updatedClosure.Metadata.ExternalResources[0].Logs, []*core.TaskLog{ + assert.Equal(t, updatedClosure.GetMetadata().GetExternalResources()[0].GetLogs(), []*core.TaskLog{ { Uri: "uri-default", }, @@ -851,17 +851,17 @@ func TestUpdateTaskExecutionModelSingleEvents(t *testing.T) { existingTaskExecution := models.TaskExecution{ TaskExecutionKey: models.TaskExecutionKey{ TaskKey: models.TaskKey{ - Project: sampleTaskID.Project, - Domain: sampleTaskID.Domain, - Name: sampleTaskID.Name, - Version: sampleTaskID.Version, + Project: sampleTaskID.GetProject(), + Domain: sampleTaskID.GetDomain(), + Name: sampleTaskID.GetName(), + Version: sampleTaskID.GetVersion(), }, NodeExecutionKey: models.NodeExecutionKey{ - NodeID: sampleNodeExecID.NodeId, + NodeID: sampleNodeExecID.GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: sampleNodeExecID.ExecutionId.Project, - Domain: sampleNodeExecID.ExecutionId.Domain, - Name: sampleNodeExecID.ExecutionId.Name, + Project: sampleNodeExecID.GetExecutionId().GetProject(), + Domain: sampleNodeExecID.GetExecutionId().GetDomain(), + Name: sampleNodeExecID.GetExecutionId().GetName(), }, }, RetryAttempt: &retryAttemptValue, @@ -943,17 +943,17 @@ func TestUpdateTaskExecutionModelBatchedEvents(t *testing.T) { existingTaskExecution := models.TaskExecution{ TaskExecutionKey: models.TaskExecutionKey{ TaskKey: models.TaskKey{ - Project: sampleTaskID.Project, - Domain: sampleTaskID.Domain, - Name: sampleTaskID.Name, - Version: sampleTaskID.Version, + Project: sampleTaskID.GetProject(), + Domain: sampleTaskID.GetDomain(), + Name: sampleTaskID.GetName(), + Version: sampleTaskID.GetVersion(), }, NodeExecutionKey: models.NodeExecutionKey{ - NodeID: sampleNodeExecID.NodeId, + NodeID: sampleNodeExecID.GetNodeId(), ExecutionKey: models.ExecutionKey{ - Project: sampleNodeExecID.ExecutionId.Project, - Domain: sampleNodeExecID.ExecutionId.Domain, - Name: sampleNodeExecID.ExecutionId.Name, + Project: sampleNodeExecID.GetExecutionId().GetProject(), + Domain: sampleNodeExecID.GetExecutionId().GetDomain(), + Name: sampleNodeExecID.GetExecutionId().GetName(), }, }, RetryAttempt: &retryAttemptValue, @@ -1130,7 +1130,7 @@ func TestFromTaskExecutionModel_Error(t *testing.T) { expectedExecErr := execErr expectedExecErr.Message = string(make([]byte, trimmedErrMessageLen)) assert.Nil(t, err) - assert.True(t, proto.Equal(expectedExecErr, taskExecution.Closure.GetError())) + assert.True(t, proto.Equal(expectedExecErr, taskExecution.GetClosure().GetError())) extraShortErrMsg := string(make([]byte, 10)) execErr = &core.ExecutionError{ @@ -1149,7 +1149,7 @@ func TestFromTaskExecutionModel_Error(t *testing.T) { expectedExecErr = execErr expectedExecErr.Message = string(make([]byte, 10)) assert.Nil(t, err) - assert.True(t, proto.Equal(expectedExecErr, taskExecution.Closure.GetError())) + assert.True(t, proto.Equal(expectedExecErr, taskExecution.GetClosure().GetError())) } func TestFromTaskExecutionModels(t *testing.T) { diff --git a/flyteadmin/pkg/repositories/transformers/task_test.go b/flyteadmin/pkg/repositories/transformers/task_test.go index 1f01f4b720..8fac3863d7 100644 --- a/flyteadmin/pkg/repositories/transformers/task_test.go +++ b/flyteadmin/pkg/repositories/transformers/task_test.go @@ -52,10 +52,10 @@ func TestFromTaskModel(t *testing.T) { Domain: "domain", Name: "name", Version: "version", - }, task.Id)) + }, task.GetId())) expectedClosure := testutils.GetTaskClosure() expectedClosure.CreatedAt = createdAtProto - assert.True(t, proto.Equal(expectedClosure, task.Closure)) + assert.True(t, proto.Equal(expectedClosure, task.GetClosure())) } func TestFromTaskModels(t *testing.T) { @@ -100,10 +100,10 @@ func TestFromTaskModels(t *testing.T) { Domain: "domain a", Name: "name a", Version: "version a", - }, taskList[0].Id)) + }, taskList[0].GetId())) expectedClosure := testutils.GetTaskClosure() expectedClosure.CreatedAt = createdAtAProto - assert.True(t, proto.Equal(expectedClosure, taskList[0].Closure)) + assert.True(t, proto.Equal(expectedClosure, taskList[0].GetClosure())) assert.True(t, proto.Equal(&core.Identifier{ ResourceType: core.ResourceType_TASK, @@ -111,11 +111,11 @@ func TestFromTaskModels(t *testing.T) { Domain: "domain b", Name: "name b", Version: "version b", - }, taskList[1].Id)) + }, taskList[1].GetId())) expectedClosure = &admin.TaskClosure{ CreatedAt: createdAtBProto, } - assert.True(t, proto.Equal(expectedClosure, taskList[1].Closure)) + assert.True(t, proto.Equal(expectedClosure, taskList[1].GetClosure())) } func TestFromTaskModelsToIdentifiers(t *testing.T) { @@ -139,10 +139,10 @@ func TestFromTaskModelsToIdentifiers(t *testing.T) { } taskIds := FromTaskModelsToIdentifiers(taskModels) - assert.Equal(t, "domain a", taskIds[0].Domain) - assert.Equal(t, "project a", taskIds[0].Project) - assert.Equal(t, "name a", taskIds[0].Name) - assert.Equal(t, "domain b", taskIds[1].Domain) - assert.Equal(t, "project b", taskIds[1].Project) - assert.Equal(t, "name b", taskIds[1].Name) + assert.Equal(t, "domain a", taskIds[0].GetDomain()) + assert.Equal(t, "project a", taskIds[0].GetProject()) + assert.Equal(t, "name a", taskIds[0].GetName()) + assert.Equal(t, "domain b", taskIds[1].GetDomain()) + assert.Equal(t, "project b", taskIds[1].GetProject()) + assert.Equal(t, "name b", taskIds[1].GetName()) } diff --git a/flyteadmin/pkg/repositories/transformers/workflow.go b/flyteadmin/pkg/repositories/transformers/workflow.go index a796987955..0dbf7196a8 100644 --- a/flyteadmin/pkg/repositories/transformers/workflow.go +++ b/flyteadmin/pkg/repositories/transformers/workflow.go @@ -17,8 +17,8 @@ import ( func CreateWorkflowModel(request *admin.WorkflowCreateRequest, remoteClosureIdentifier string, digest []byte) (models.Workflow, error) { var typedInterface []byte - if request.Spec != nil && request.Spec.Template != nil && request.Spec.Template.Interface != nil { - serializedTypedInterface, err := proto.Marshal(request.Spec.Template.Interface) + if request.GetSpec() != nil && request.GetSpec().GetTemplate() != nil && request.GetSpec().GetTemplate().GetInterface() != nil { + serializedTypedInterface, err := proto.Marshal(request.GetSpec().GetTemplate().GetInterface()) if err != nil { return models.Workflow{}, errors.NewFlyteAdminError(codes.Internal, "Failed to serialize workflow spec") } @@ -26,10 +26,10 @@ func CreateWorkflowModel(request *admin.WorkflowCreateRequest, remoteClosureIden } return models.Workflow{ WorkflowKey: models.WorkflowKey{ - Project: request.Id.Project, - Domain: request.Id.Domain, - Name: request.Id.Name, - Version: request.Id.Version, + Project: request.GetId().GetProject(), + Domain: request.GetId().GetDomain(), + Name: request.GetId().GetName(), + Version: request.GetId().GetVersion(), }, TypedInterface: typedInterface, RemoteClosureIdentifier: remoteClosureIdentifier, @@ -54,7 +54,7 @@ func FromWorkflowModel(workflowModel models.Workflow) (admin.Workflow, error) { if len(workflowModel.TypedInterface) > 0 { err = proto.Unmarshal(workflowModel.TypedInterface, &workflowInterface) if err != nil { - return admin.Workflow{}, errors.NewFlyteAdminErrorf(codes.Internal, fmt.Sprintf("failed to unmarshal workflow %v interface. Error message: %v", workflowModel.ID, err.Error())) + return admin.Workflow{}, errors.NewFlyteAdminErrorf(codes.Internal, fmt.Sprintf("failed to unmarshal workflow %v interface. Error message: %v", workflowModel.ID, err.Error())) //nolint } } diff --git a/flyteadmin/pkg/repositories/transformers/workflow_test.go b/flyteadmin/pkg/repositories/transformers/workflow_test.go index 0f29aaa64e..95a698075d 100644 --- a/flyteadmin/pkg/repositories/transformers/workflow_test.go +++ b/flyteadmin/pkg/repositories/transformers/workflow_test.go @@ -70,7 +70,7 @@ func TestFromWorkflowModel(t *testing.T) { Domain: "domain", Name: "name", Version: "version", - }, workflow.Id)) + }, workflow.GetId())) var workflowInterface core.TypedInterface err = proto.Unmarshal(workflowModel.TypedInterface, &workflowInterface) @@ -85,7 +85,7 @@ func TestFromWorkflowModel(t *testing.T) { }, }, }, - }, workflow.Closure)) + }, workflow.GetClosure())) } func TestFromWorkflowModels(t *testing.T) { @@ -131,7 +131,7 @@ func TestFromWorkflowModels(t *testing.T) { Domain: "domain a", Name: "name a", Version: "version a", - }, workflowList[0].Id)) + }, workflowList[0].GetId())) workflowInterface := testutils.GetWorkflowRequestInterface() assert.NoError(t, err) @@ -145,7 +145,7 @@ func TestFromWorkflowModels(t *testing.T) { }, }, }, - }, workflowList[0].Closure)) + }, workflowList[0].GetClosure())) assert.True(t, proto.Equal(&core.Identifier{ ResourceType: core.ResourceType_WORKFLOW, @@ -153,7 +153,7 @@ func TestFromWorkflowModels(t *testing.T) { Domain: "domain b", Name: "name b", Version: "version b", - }, workflowList[1].Id)) + }, workflowList[1].GetId())) assert.True(t, proto.Equal(&admin.WorkflowClosure{ CreatedAt: createdAtBProto, @@ -164,5 +164,5 @@ func TestFromWorkflowModels(t *testing.T) { }, }, }, - }, workflowList[1].Closure)) + }, workflowList[1].GetClosure())) } diff --git a/flyteadmin/pkg/rpc/adminservice/description_entity.go b/flyteadmin/pkg/rpc/adminservice/description_entity.go index 91e3f0f134..b8bb5f1d06 100644 --- a/flyteadmin/pkg/rpc/adminservice/description_entity.go +++ b/flyteadmin/pkg/rpc/adminservice/description_entity.go @@ -12,7 +12,7 @@ import ( func (m *AdminService) GetDescriptionEntity(ctx context.Context, request *admin.ObjectGetRequest) (*admin.DescriptionEntity, error) { // NOTE: When the Get HTTP endpoint is called the resource type is implicit (from the URL) so we must add it // to the request. - if request.Id != nil && request.Id.ResourceType == core.ResourceType_UNSPECIFIED { + if request.GetId() != nil && request.GetId().GetResourceType() == core.ResourceType_UNSPECIFIED { logger.Infof(ctx, "Adding resource type for unspecified value in request: [%+v]", request) request.Id.ResourceType = core.ResourceType_TASK } diff --git a/flyteadmin/pkg/rpc/adminservice/launch_plan.go b/flyteadmin/pkg/rpc/adminservice/launch_plan.go index f0dabdb76c..1cdf757362 100644 --- a/flyteadmin/pkg/rpc/adminservice/launch_plan.go +++ b/flyteadmin/pkg/rpc/adminservice/launch_plan.go @@ -26,7 +26,7 @@ func (m *AdminService) CreateLaunchPlan( func (m *AdminService) GetLaunchPlan(ctx context.Context, request *admin.ObjectGetRequest) (*admin.LaunchPlan, error) { // NOTE: When the Get HTTP endpoint is called the resource type is implicit (from the URL) so we must add it // to the request. - if request.Id != nil && request.Id.ResourceType == core.ResourceType_UNSPECIFIED { + if request.GetId() != nil && request.GetId().GetResourceType() == core.ResourceType_UNSPECIFIED { logger.Infof(ctx, "Adding resource type for unspecified value in request: [%+v]", request) request.Id.ResourceType = core.ResourceType_LAUNCH_PLAN } @@ -60,7 +60,7 @@ func (m *AdminService) UpdateLaunchPlan(ctx context.Context, request *admin.Laun *admin.LaunchPlanUpdateResponse, error) { // NOTE: When the Get HTTP endpoint is called the resource type is implicit (from the URL) so we must add it // to the request. - if request.Id != nil && request.Id.ResourceType == core.ResourceType_UNSPECIFIED { + if request.GetId() != nil && request.GetId().GetResourceType() == core.ResourceType_UNSPECIFIED { logger.Infof(ctx, "Adding resource type for unspecified value in request: [%+v]", request) request.Id.ResourceType = core.ResourceType_LAUNCH_PLAN } diff --git a/flyteadmin/pkg/rpc/adminservice/node_execution.go b/flyteadmin/pkg/rpc/adminservice/node_execution.go index e8965edd1d..3f6383a4e4 100644 --- a/flyteadmin/pkg/rpc/adminservice/node_execution.go +++ b/flyteadmin/pkg/rpc/adminservice/node_execution.go @@ -68,8 +68,8 @@ func (m *AdminService) ListNodeExecutionsForTask( ctx context.Context, request *admin.NodeExecutionForTaskListRequest) (*admin.NodeExecutionList, error) { // NOTE: When the Get HTTP endpoint is called the resource type is implicit (from the URL) so we must add it // to the request. - if request.TaskExecutionId != nil && request.TaskExecutionId.TaskId != nil && - request.TaskExecutionId.TaskId.ResourceType == core.ResourceType_UNSPECIFIED { + if request.GetTaskExecutionId() != nil && request.GetTaskExecutionId().GetTaskId() != nil && + request.GetTaskExecutionId().GetTaskId().GetResourceType() == core.ResourceType_UNSPECIFIED { logger.Infof(ctx, "Adding resource type for unspecified value in request: [%+v]", request) request.TaskExecutionId.TaskId.ResourceType = core.ResourceType_TASK } diff --git a/flyteadmin/pkg/rpc/adminservice/task.go b/flyteadmin/pkg/rpc/adminservice/task.go index 50ed9f8eba..9d4e2883b3 100644 --- a/flyteadmin/pkg/rpc/adminservice/task.go +++ b/flyteadmin/pkg/rpc/adminservice/task.go @@ -27,7 +27,7 @@ func (m *AdminService) CreateTask( func (m *AdminService) GetTask(ctx context.Context, request *admin.ObjectGetRequest) (*admin.Task, error) { // NOTE: When the Get HTTP endpoint is called the resource type is implicit (from the URL) so we must add it // to the request. - if request.Id != nil && request.Id.ResourceType == core.ResourceType_UNSPECIFIED { + if request.GetId() != nil && request.GetId().GetResourceType() == core.ResourceType_UNSPECIFIED { logger.Infof(ctx, "Adding resource type for unspecified value in request: [%+v]", request) request.Id.ResourceType = core.ResourceType_TASK } diff --git a/flyteadmin/pkg/rpc/adminservice/task_execution.go b/flyteadmin/pkg/rpc/adminservice/task_execution.go index d0d8a99d56..3b98fe5057 100644 --- a/flyteadmin/pkg/rpc/adminservice/task_execution.go +++ b/flyteadmin/pkg/rpc/adminservice/task_execution.go @@ -28,11 +28,11 @@ func (m *AdminService) GetTaskExecution( ctx context.Context, request *admin.TaskExecutionGetRequest) (*admin.TaskExecution, error) { // NOTE: When the Get HTTP endpoint is called the resource type is implicit (from the URL) so we must add it // to the request. - if request.Id != nil && request.Id.TaskId != nil && request.Id.TaskId.ResourceType == core.ResourceType_UNSPECIFIED { + if request.GetId() != nil && request.GetId().GetTaskId() != nil && request.GetId().GetTaskId().GetResourceType() == core.ResourceType_UNSPECIFIED { logger.Infof(ctx, "Adding resource type for unspecified value in request: [%+v]", request) request.Id.TaskId.ResourceType = core.ResourceType_TASK } - if err := validation.ValidateTaskExecutionIdentifier(request.Id); err != nil { + if err := validation.ValidateTaskExecutionIdentifier(request.GetId()); err != nil { return nil, err } @@ -70,7 +70,7 @@ func (m *AdminService) GetTaskExecutionData( ctx context.Context, request *admin.TaskExecutionGetDataRequest) (*admin.TaskExecutionGetDataResponse, error) { // NOTE: When the Get HTTP endpoint is called the resource type is implicit (from the URL) so we must add it // to the request. - if request.Id != nil && request.Id.TaskId != nil && request.Id.TaskId.ResourceType == core.ResourceType_UNSPECIFIED { + if request.GetId() != nil && request.GetId().GetTaskId() != nil && request.GetId().GetTaskId().GetResourceType() == core.ResourceType_UNSPECIFIED { logger.Infof(ctx, "Adding resource type for unspecified value in request: [%+v]", request) request.Id.TaskId.ResourceType = core.ResourceType_TASK } diff --git a/flyteadmin/pkg/rpc/adminservice/tests/execution_test.go b/flyteadmin/pkg/rpc/adminservice/tests/execution_test.go index f541eea806..ef73e60eaa 100644 --- a/flyteadmin/pkg/rpc/adminservice/tests/execution_test.go +++ b/flyteadmin/pkg/rpc/adminservice/tests/execution_test.go @@ -36,9 +36,9 @@ func TestCreateExecutionHappyCase(t *testing.T) { request *admin.ExecutionCreateRequest, requestedAt time.Time) (*admin.ExecutionCreateResponse, error) { return &admin.ExecutionCreateResponse{ Id: &core.WorkflowExecutionIdentifier{ - Project: request.Project, - Domain: request.Domain, - Name: request.Name, + Project: request.GetProject(), + Domain: request.GetDomain(), + Name: request.GetName(), }, }, nil }, @@ -52,7 +52,7 @@ func TestCreateExecutionHappyCase(t *testing.T) { Domain: "Domain", Project: "Project", }) - assert.True(t, proto.Equal(&workflowExecutionIdentifier, resp.Id)) + assert.True(t, proto.Equal(&workflowExecutionIdentifier, resp.GetId())) assert.NoError(t, err) } @@ -64,9 +64,9 @@ func TestCreateExecutionError(t *testing.T) { func(ctx context.Context, request *admin.ExecutionCreateRequest, requestedAt time.Time) (*admin.ExecutionCreateResponse, error) { return nil, repoErrors.GetMissingEntityError("execution", &core.Identifier{ - Project: request.Project, - Domain: request.Domain, - Name: request.Name, + Project: request.GetProject(), + Domain: request.GetDomain(), + Name: request.GetName(), }) }, ) @@ -93,9 +93,9 @@ func TestRelaunchExecutionHappyCase(t *testing.T) { request *admin.ExecutionRelaunchRequest, requestedAt time.Time) (*admin.ExecutionCreateResponse, error) { return &admin.ExecutionCreateResponse{ Id: &core.WorkflowExecutionIdentifier{ - Project: request.Id.Project, - Domain: request.Id.Domain, - Name: request.Name, + Project: request.GetId().GetProject(), + Domain: request.GetId().GetDomain(), + Name: request.GetName(), }, }, nil }, @@ -111,9 +111,9 @@ func TestRelaunchExecutionHappyCase(t *testing.T) { }, Name: "name", }) - assert.Equal(t, "project", resp.Id.Project) - assert.Equal(t, "domain", resp.Id.Domain) - assert.Equal(t, "name", resp.Id.Name) + assert.Equal(t, "project", resp.GetId().GetProject()) + assert.Equal(t, "domain", resp.GetId().GetDomain()) + assert.Equal(t, "name", resp.GetId().GetName()) assert.NoError(t, err) } @@ -124,7 +124,7 @@ func TestRelaunchExecutionError(t *testing.T) { mockExecutionManager.SetRelaunchCallback( func(ctx context.Context, request *admin.ExecutionRelaunchRequest, requestedAt time.Time) (*admin.ExecutionCreateResponse, error) { - return nil, repoErrors.GetMissingEntityError("execution", request.Id) + return nil, repoErrors.GetMissingEntityError("execution", request.GetId()) }, ) mockServer := NewMockAdminServer(NewMockAdminServerInput{ @@ -148,9 +148,9 @@ func TestRecoverExecutionHappyCase(t *testing.T) { request *admin.ExecutionRecoverRequest, requestedAt time.Time) (*admin.ExecutionCreateResponse, error) { return &admin.ExecutionCreateResponse{ Id: &core.WorkflowExecutionIdentifier{ - Project: request.Id.Project, - Domain: request.Id.Domain, - Name: request.Name, + Project: request.GetId().GetProject(), + Domain: request.GetId().GetDomain(), + Name: request.GetName(), }, }, nil } @@ -166,9 +166,9 @@ func TestRecoverExecutionHappyCase(t *testing.T) { }, Name: "name", }) - assert.Equal(t, "project", resp.Id.Project) - assert.Equal(t, "domain", resp.Id.Domain) - assert.Equal(t, "name", resp.Id.Name) + assert.Equal(t, "project", resp.GetId().GetProject()) + assert.Equal(t, "domain", resp.GetId().GetDomain()) + assert.Equal(t, "name", resp.GetId().GetName()) assert.NoError(t, err) } @@ -179,7 +179,7 @@ func TestRecoverExecutionError(t *testing.T) { mockExecutionManager.RecoverExecutionFunc = func(ctx context.Context, request *admin.ExecutionRecoverRequest, requestedAt time.Time) (*admin.ExecutionCreateResponse, error) { - return nil, repoErrors.GetMissingEntityError("execution", request.Id) + return nil, repoErrors.GetMissingEntityError("execution", request.GetId()) } mockServer := NewMockAdminServer(NewMockAdminServerInput{ executionManager: &mockExecutionManager, @@ -199,10 +199,10 @@ func TestCreateWorkflowEvent(t *testing.T) { mockExecutionManager.SetCreateEventCallback( func(ctx context.Context, request *admin.WorkflowExecutionEventRequest) ( *admin.WorkflowExecutionEventResponse, error) { - assert.Equal(t, requestID, request.RequestId) - assert.NotNil(t, request.Event) - assert.True(t, proto.Equal(&workflowExecutionIdentifier, request.Event.ExecutionId)) - assert.Equal(t, phase, request.Event.Phase) + assert.Equal(t, requestID, request.GetRequestId()) + assert.NotNil(t, request.GetEvent()) + assert.True(t, proto.Equal(&workflowExecutionIdentifier, request.GetEvent().GetExecutionId())) + assert.Equal(t, phase, request.GetEvent().GetPhase()) return &admin.WorkflowExecutionEventResponse{}, nil }) mockServer := NewMockAdminServer(NewMockAdminServerInput{ @@ -248,7 +248,7 @@ func TestGetExecution(t *testing.T) { mockExecutionManager.SetGetCallback( func(ctx context.Context, request *admin.WorkflowExecutionGetRequest) (*admin.Execution, error) { - assert.True(t, proto.Equal(&workflowExecutionIdentifier, request.Id)) + assert.True(t, proto.Equal(&workflowExecutionIdentifier, request.GetId())) return response, nil }, ) @@ -288,7 +288,7 @@ func TestUpdateExecution(t *testing.T) { mockExecutionManager.SetUpdateExecutionCallback( func(ctx context.Context, request *admin.ExecutionUpdateRequest, requestedAt time.Time) (*admin.ExecutionUpdateResponse, error) { - assert.True(t, proto.Equal(&workflowExecutionIdentifier, request.Id)) + assert.True(t, proto.Equal(&workflowExecutionIdentifier, request.GetId())) return response, nil }, ) @@ -326,9 +326,9 @@ func TestListExecutions(t *testing.T) { mockExecutionManager := mocks.MockExecutionManager{} mockExecutionManager.SetListCallback(func(ctx context.Context, request *admin.ResourceListRequest) ( *admin.ExecutionList, error) { - assert.Equal(t, "project", request.Id.Project) - assert.Equal(t, "domain", request.Id.Domain) - assert.Equal(t, uint32(1), request.Limit) + assert.Equal(t, "project", request.GetId().GetProject()) + assert.Equal(t, "domain", request.GetId().GetDomain()) + assert.Equal(t, uint32(1), request.GetLimit()) return &admin.ExecutionList{ Executions: []*admin.Execution{ { @@ -350,7 +350,7 @@ func TestListExecutions(t *testing.T) { Limit: 1, }) assert.NoError(t, err) - assert.Len(t, response.Executions, 1) + assert.Len(t, response.GetExecutions(), 1) } func TestListExecutionsError(t *testing.T) { @@ -386,8 +386,8 @@ func TestTerminateExecution(t *testing.T) { abortCause := "abort cause" mockExecutionManager.SetTerminateExecutionCallback(func( ctx context.Context, request *admin.ExecutionTerminateRequest) (*admin.ExecutionTerminateResponse, error) { - assert.True(t, proto.Equal(&identifier, request.Id)) - assert.Equal(t, abortCause, request.Cause) + assert.True(t, proto.Equal(&identifier, request.GetId())) + assert.Equal(t, abortCause, request.GetCause()) return &admin.ExecutionTerminateResponse{}, nil }) mockServer := NewMockAdminServer(NewMockAdminServerInput{ diff --git a/flyteadmin/pkg/rpc/adminservice/tests/launch_plan_test.go b/flyteadmin/pkg/rpc/adminservice/tests/launch_plan_test.go index 00cd10e04f..4fabdbb9c0 100644 --- a/flyteadmin/pkg/rpc/adminservice/tests/launch_plan_test.go +++ b/flyteadmin/pkg/rpc/adminservice/tests/launch_plan_test.go @@ -47,7 +47,7 @@ func TestCreateLaunchPlanError(t *testing.T) { mockLaunchPlanManager.SetCreateCallback( func(ctx context.Context, request *admin.LaunchPlanCreateRequest) (*admin.LaunchPlanCreateResponse, error) { - return nil, errors.GetMissingEntityError(core.ResourceType_LAUNCH_PLAN.String(), request.Id) + return nil, errors.GetMissingEntityError(core.ResourceType_LAUNCH_PLAN.String(), request.GetId()) }, ) mockServer := NewMockAdminServer(NewMockAdminServerInput{ diff --git a/flyteadmin/pkg/rpc/adminservice/tests/node_execution_test.go b/flyteadmin/pkg/rpc/adminservice/tests/node_execution_test.go index 575140fef0..72cdc57ea5 100644 --- a/flyteadmin/pkg/rpc/adminservice/tests/node_execution_test.go +++ b/flyteadmin/pkg/rpc/adminservice/tests/node_execution_test.go @@ -32,10 +32,10 @@ func TestCreateNodeEvent(t *testing.T) { mockNodeExecutionManager.SetCreateNodeEventCallback( func(ctx context.Context, request *admin.NodeExecutionEventRequest) ( *admin.NodeExecutionEventResponse, error) { - assert.Equal(t, requestID, request.RequestId) - assert.NotNil(t, request.Event) - assert.True(t, proto.Equal(&nodeExecutionID, request.Event.Id)) - assert.Equal(t, phase, request.Event.Phase) + assert.Equal(t, requestID, request.GetRequestId()) + assert.NotNil(t, request.GetEvent()) + assert.True(t, proto.Equal(&nodeExecutionID, request.GetEvent().GetId())) + assert.Equal(t, phase, request.GetEvent().GetPhase()) return &admin.NodeExecutionEventResponse{}, nil }) mockServer := NewMockAdminServer(NewMockAdminServerInput{ @@ -82,7 +82,7 @@ func TestGetNodeExecution(t *testing.T) { mockNodeExecutionManager.SetGetNodeExecutionFunc( func(ctx context.Context, request *admin.NodeExecutionGetRequest) (*admin.NodeExecution, error) { - assert.True(t, proto.Equal(&nodeExecutionID, request.Id)) + assert.True(t, proto.Equal(&nodeExecutionID, request.GetId())) return response, nil }, ) @@ -102,7 +102,7 @@ func TestGetNodeExecutionError(t *testing.T) { mockNodeExecutionManager.SetGetNodeExecutionFunc( func(ctx context.Context, request *admin.NodeExecutionGetRequest) (*admin.NodeExecution, error) { - assert.True(t, proto.Equal(&nodeExecutionID, request.Id)) + assert.True(t, proto.Equal(&nodeExecutionID, request.GetId())) return nil, errors.New("expected error") }, ) @@ -123,9 +123,9 @@ func TestListNodeExecutions(t *testing.T) { filters := "encoded filters probably" mockNodeExecutionManager.SetListNodeExecutionsFunc(func(ctx context.Context, request *admin.NodeExecutionListRequest) ( *admin.NodeExecutionList, error) { - assert.Equal(t, filters, request.Filters) - assert.Equal(t, uint32(1), request.Limit) - assert.Equal(t, "20", request.Token) + assert.Equal(t, filters, request.GetFilters()) + assert.Equal(t, uint32(1), request.GetLimit()) + assert.Equal(t, "20", request.GetToken()) return &admin.NodeExecutionList{ NodeExecutions: []*admin.NodeExecution{ { @@ -145,7 +145,7 @@ func TestListNodeExecutions(t *testing.T) { Token: "20", }) assert.NoError(t, err) - assert.Len(t, response.NodeExecutions, 1) + assert.Len(t, response.GetNodeExecutions(), 1) } func TestListNodeExecutionsError(t *testing.T) { @@ -174,9 +174,9 @@ func TestListNodeExecutionsForTask(t *testing.T) { mockNodeExecutionManager.SetListNodeExecutionsForTaskFunc( func(ctx context.Context, request *admin.NodeExecutionForTaskListRequest) ( *admin.NodeExecutionList, error) { - assert.Equal(t, filters, request.Filters) - assert.Equal(t, uint32(1), request.Limit) - assert.Equal(t, "20", request.Token) + assert.Equal(t, filters, request.GetFilters()) + assert.Equal(t, uint32(1), request.GetLimit()) + assert.Equal(t, "20", request.GetToken()) return &admin.NodeExecutionList{ NodeExecutions: []*admin.NodeExecution{ { @@ -196,7 +196,7 @@ func TestListNodeExecutionsForTask(t *testing.T) { Token: "20", }) assert.NoError(t, err) - assert.Len(t, response.NodeExecutions, 1) + assert.Len(t, response.GetNodeExecutions(), 1) } func TestListNodeExecutionsForTaskError(t *testing.T) { @@ -225,7 +225,7 @@ func TestGetNodeExecutionData(t *testing.T) { mockNodeExecutionManager.SetGetNodeExecutionDataFunc( func(ctx context.Context, request *admin.NodeExecutionGetDataRequest) (*admin.NodeExecutionGetDataResponse, error) { - assert.True(t, proto.Equal(&nodeExecutionID, request.Id)) + assert.True(t, proto.Equal(&nodeExecutionID, request.GetId())) return &admin.NodeExecutionGetDataResponse{ Inputs: &admin.UrlBlob{ Url: "inputs", @@ -249,9 +249,9 @@ func TestGetNodeExecutionData(t *testing.T) { assert.True(t, proto.Equal(&admin.UrlBlob{ Url: "inputs", Bytes: 100, - }, resp.Inputs)) + }, resp.GetInputs())) assert.True(t, proto.Equal(&admin.UrlBlob{ Url: "outputs", Bytes: 200, - }, resp.Outputs)) + }, resp.GetOutputs())) } diff --git a/flyteadmin/pkg/rpc/adminservice/tests/task_execution_test.go b/flyteadmin/pkg/rpc/adminservice/tests/task_execution_test.go index b261401905..637426c455 100644 --- a/flyteadmin/pkg/rpc/adminservice/tests/task_execution_test.go +++ b/flyteadmin/pkg/rpc/adminservice/tests/task_execution_test.go @@ -47,11 +47,11 @@ func TestTaskExecution(t *testing.T) { mockTaskExecutionManager.SetCreateTaskEventCallback( func(ctx context.Context, request *admin.TaskExecutionEventRequest) ( *admin.TaskExecutionEventResponse, error) { - assert.Equal(t, requestID, request.RequestId) - assert.NotNil(t, request.Event) - assert.True(t, proto.Equal(taskID, request.Event.TaskId)) - assert.Equal(t, phase, request.Event.Phase) - assert.Equal(t, retryAttempt, request.Event.RetryAttempt) + assert.Equal(t, requestID, request.GetRequestId()) + assert.NotNil(t, request.GetEvent()) + assert.True(t, proto.Equal(taskID, request.GetEvent().GetTaskId())) + assert.Equal(t, phase, request.GetEvent().GetPhase()) + assert.Equal(t, retryAttempt, request.GetEvent().GetRetryAttempt()) return &admin.TaskExecutionEventResponse{}, nil }) mockServer := NewMockAdminServer(NewMockAdminServerInput{ @@ -143,9 +143,9 @@ func TestTaskExecution(t *testing.T) { mockTaskExecutionManager.SetGetTaskExecutionCallback( func(ctx context.Context, request *admin.TaskExecutionGetRequest) ( *admin.TaskExecution, error) { - assert.Equal(t, taskID, request.Id.TaskId) - assert.Equal(t, nodeExecutionID, request.Id.NodeExecutionId) - assert.Equal(t, retryAttempt, request.Id.RetryAttempt) + assert.Equal(t, taskID, request.GetId().GetTaskId()) + assert.Equal(t, nodeExecutionID, request.GetId().GetNodeExecutionId()) + assert.Equal(t, retryAttempt, request.GetId().GetRetryAttempt()) return &admin.TaskExecution{}, nil }) mockServer := NewMockAdminServer(NewMockAdminServerInput{ @@ -232,8 +232,8 @@ func TestTaskExecution(t *testing.T) { mockTaskExecutionManager.SetListTaskExecutionsCallback( func(ctx context.Context, request *admin.TaskExecutionListRequest) ( *admin.TaskExecutionList, error) { - assert.Equal(t, "1", request.Token) - assert.Equal(t, uint32(99), request.Limit) + assert.Equal(t, "1", request.GetToken()) + assert.Equal(t, uint32(99), request.GetLimit()) assert.True(t, proto.Equal(&core.NodeExecutionIdentifier{ NodeId: "nodey", ExecutionId: &core.WorkflowExecutionIdentifier{ @@ -241,7 +241,7 @@ func TestTaskExecution(t *testing.T) { Domain: "domain", Name: "name", }, - }, request.NodeExecutionId)) + }, request.GetNodeExecutionId())) return &admin.TaskExecutionList{}, nil }) mockServer := NewMockAdminServer(NewMockAdminServerInput{ @@ -344,9 +344,9 @@ func TestGetTaskExecutionData(t *testing.T) { assert.True(t, proto.Equal(&admin.UrlBlob{ Url: "inputs", Bytes: 100, - }, resp.Inputs)) + }, resp.GetInputs())) assert.True(t, proto.Equal(&admin.UrlBlob{ Url: "outputs", Bytes: 200, - }, resp.Outputs)) + }, resp.GetOutputs())) } diff --git a/flyteadmin/pkg/rpc/adminservice/tests/task_test.go b/flyteadmin/pkg/rpc/adminservice/tests/task_test.go index 2e4d5a8287..bd17b1baa6 100644 --- a/flyteadmin/pkg/rpc/adminservice/tests/task_test.go +++ b/flyteadmin/pkg/rpc/adminservice/tests/task_test.go @@ -49,7 +49,7 @@ func TestTaskError(t *testing.T) { mockTaskManager.SetCreateCallback( func(ctx context.Context, request *admin.TaskCreateRequest) (*admin.TaskCreateResponse, error) { - return nil, errors.GetMissingEntityError(core.ResourceType_TASK.String(), request.Id) + return nil, errors.GetMissingEntityError(core.ResourceType_TASK.String(), request.GetId()) }, ) mockServer := NewMockAdminServer(NewMockAdminServerInput{ @@ -77,7 +77,7 @@ func TestListUniqueTaskIds(t *testing.T) { mockTaskManager.SetListUniqueIdsFunc(func(ctx context.Context, request *admin.NamedEntityIdentifierListRequest) ( *admin.NamedEntityIdentifierList, error) { - assert.Equal(t, "staging", request.Domain) + assert.Equal(t, "staging", request.GetDomain()) return nil, nil }) mockServer := NewMockAdminServer(NewMockAdminServerInput{ diff --git a/flyteadmin/pkg/rpc/adminservice/tests/workflow_test.go b/flyteadmin/pkg/rpc/adminservice/tests/workflow_test.go index 915c127ac2..5799b32519 100644 --- a/flyteadmin/pkg/rpc/adminservice/tests/workflow_test.go +++ b/flyteadmin/pkg/rpc/adminservice/tests/workflow_test.go @@ -49,7 +49,7 @@ func TestCreateWorkflowError(t *testing.T) { mockWorkflowManager.SetCreateCallback( func(ctx context.Context, request *admin.WorkflowCreateRequest) (*admin.WorkflowCreateResponse, error) { - return nil, errors.GetMissingEntityError(core.ResourceType_WORKFLOW.String(), request.Id) + return nil, errors.GetMissingEntityError(core.ResourceType_WORKFLOW.String(), request.GetId()) }, ) mockServer := NewMockAdminServer(NewMockAdminServerInput{ diff --git a/flyteadmin/pkg/rpc/adminservice/util/metrics.go b/flyteadmin/pkg/rpc/adminservice/util/metrics.go index bcab066a41..1274541a9b 100644 --- a/flyteadmin/pkg/rpc/adminservice/util/metrics.go +++ b/flyteadmin/pkg/rpc/adminservice/util/metrics.go @@ -54,7 +54,7 @@ func (m *RequestMetrics) Success() { func newResponseCodeMetrics(scope promutils.Scope) responseCodeMetrics { responseCodeCounters := make(map[codes.Code]prometheus.Counter) for i := 0; i < maxGRPCStatusCode; i++ { - code := codes.Code(i) + code := codes.Code(i) // #nosec G115 responseCodeCounters[code] = scope.MustNewCounter(code.String(), fmt.Sprintf("count of responses returning: %s", code.String())) } diff --git a/flyteadmin/pkg/rpc/adminservice/workflow.go b/flyteadmin/pkg/rpc/adminservice/workflow.go index ee9a6b4eff..0e0b425f7c 100644 --- a/flyteadmin/pkg/rpc/adminservice/workflow.go +++ b/flyteadmin/pkg/rpc/adminservice/workflow.go @@ -27,7 +27,7 @@ func (m *AdminService) CreateWorkflow( func (m *AdminService) GetWorkflow(ctx context.Context, request *admin.ObjectGetRequest) (*admin.Workflow, error) { // NOTE: When the Get HTTP endpoint is called the resource type is implicit (from the URL) so we must add it // to the request. - if request.Id != nil && request.Id.ResourceType == core.ResourceType_UNSPECIFIED { + if request.GetId() != nil && request.GetId().GetResourceType() == core.ResourceType_UNSPECIFIED { logger.Infof(ctx, "Adding resource type for unspecified value in request: [%+v]", request) request.Id.ResourceType = core.ResourceType_WORKFLOW } diff --git a/flyteadmin/pkg/runtime/interfaces/application_configuration.go b/flyteadmin/pkg/runtime/interfaces/application_configuration.go index 55791a1538..0f729b5cde 100644 --- a/flyteadmin/pkg/runtime/interfaces/application_configuration.go +++ b/flyteadmin/pkg/runtime/interfaces/application_configuration.go @@ -212,16 +212,16 @@ func (a *ApplicationConfig) GetAsWorkflowExecutionConfig() *admin.WorkflowExecut } // For the others, we only add the field when the field is set in the config. - if a.GetSecurityContext().RunAs.GetK8SServiceAccount() != "" || a.GetSecurityContext().RunAs.GetIamRole() != "" { + if a.GetSecurityContext().GetRunAs().GetK8SServiceAccount() != "" || a.GetSecurityContext().GetRunAs().GetIamRole() != "" { wec.SecurityContext = a.GetSecurityContext() } - if a.GetRawOutputDataConfig().OutputLocationPrefix != "" { + if a.GetRawOutputDataConfig().GetOutputLocationPrefix() != "" { wec.RawOutputDataConfig = a.GetRawOutputDataConfig() } - if len(a.GetLabels().Values) > 0 { + if len(a.GetLabels().GetValues()) > 0 { wec.Labels = a.GetLabels() } - if len(a.GetAnnotations().Values) > 0 { + if len(a.GetAnnotations().GetValues()) > 0 { wec.Annotations = a.GetAnnotations() } diff --git a/flyteadmin/pkg/workflowengine/impl/interface_provider.go b/flyteadmin/pkg/workflowengine/impl/interface_provider.go index 566613f635..6bae0c9a05 100644 --- a/flyteadmin/pkg/workflowengine/impl/interface_provider.go +++ b/flyteadmin/pkg/workflowengine/impl/interface_provider.go @@ -42,8 +42,8 @@ func NewLaunchPlanInterfaceProvider(launchPlan models.LaunchPlan, identifier *co return &LaunchPlanInterfaceProvider{}, err } return &LaunchPlanInterfaceProvider{ - expectedInputs: closure.ExpectedInputs, - expectedOutputs: closure.ExpectedOutputs, + expectedInputs: closure.GetExpectedInputs(), + expectedOutputs: closure.GetExpectedOutputs(), identifier: identifier, }, nil } diff --git a/flyteadmin/pkg/workflowengine/impl/interface_provider_test.go b/flyteadmin/pkg/workflowengine/impl/interface_provider_test.go index 4d96050f7d..5924dab20c 100644 --- a/flyteadmin/pkg/workflowengine/impl/interface_provider_test.go +++ b/flyteadmin/pkg/workflowengine/impl/interface_provider_test.go @@ -64,14 +64,14 @@ func TestGetId(t *testing.T) { func TestGetExpectedInputs(t *testing.T) { provider := getProviderForTest(t) - assert.Contains(t, (*provider.GetExpectedInputs()).Parameters, "foo") - assert.NotNil(t, (*provider.GetExpectedInputs()).Parameters["foo"].Var.Type.GetSimple()) - assert.EqualValues(t, "STRING", (*provider.GetExpectedInputs()).Parameters["foo"].Var.Type.GetSimple().String()) - assert.NotNil(t, (*provider.GetExpectedInputs()).Parameters["foo"].GetDefault()) + assert.Contains(t, (*provider.GetExpectedInputs()).GetParameters(), "foo") + assert.NotNil(t, (*provider.GetExpectedInputs()).GetParameters()["foo"].GetVar().GetType().GetSimple()) + assert.EqualValues(t, "STRING", (*provider.GetExpectedInputs()).GetParameters()["foo"].GetVar().GetType().GetSimple().String()) + assert.NotNil(t, (*provider.GetExpectedInputs()).GetParameters()["foo"].GetDefault()) } func TestGetExpectedOutputs(t *testing.T) { provider := getProviderForTest(t) - assert.EqualValues(t, outputs.Variables["foo"].GetType().GetType(), - provider.GetExpectedOutputs().Variables["foo"].GetType().GetType()) + assert.EqualValues(t, outputs.GetVariables()["foo"].GetType().GetType(), + provider.GetExpectedOutputs().GetVariables()["foo"].GetType().GetType()) } diff --git a/flyteadmin/pkg/workflowengine/impl/k8s_executor.go b/flyteadmin/pkg/workflowengine/impl/k8s_executor.go index d941cc8309..03de2bbba9 100644 --- a/flyteadmin/pkg/workflowengine/impl/k8s_executor.go +++ b/flyteadmin/pkg/workflowengine/impl/k8s_executor.go @@ -37,7 +37,7 @@ func (e K8sWorkflowExecutor) Execute(ctx context.Context, data interfaces.Execut flyteWf, err := e.workflowBuilder.Build(data.WorkflowClosure, data.ExecutionParameters.Inputs, data.ExecutionID, data.Namespace) if err != nil { logger.Infof(ctx, "failed to build the workflow [%+v] %v", - data.WorkflowClosure.Primary.Template.Id, err) + data.WorkflowClosure.GetPrimary().GetTemplate().GetId(), err) return interfaces.ExecutionResponse{}, err } err = PrepareFlyteWorkflow(data, flyteWf) @@ -64,11 +64,11 @@ func (e K8sWorkflowExecutor) Execute(ctx context.Context, data interfaces.Execut } executionTargetSpec := executioncluster.ExecutionTargetSpec{ - Project: data.ExecutionID.Project, - Domain: data.ExecutionID.Domain, + Project: data.ExecutionID.GetProject(), + Domain: data.ExecutionID.GetDomain(), Workflow: data.ReferenceWorkflowName, LaunchPlan: data.ReferenceWorkflowName, - ExecutionID: data.ExecutionID.Name, + ExecutionID: data.ExecutionID.GetName(), ExecutionClusterLabel: data.ExecutionParameters.ExecutionClusterLabel, } targetCluster, err := e.executionCluster.GetTarget(ctx, &executionTargetSpec) @@ -92,7 +92,7 @@ func (e K8sWorkflowExecutor) Abort(ctx context.Context, data interfaces.AbortDat TargetID: data.Cluster, }) if err != nil { - return errors.NewFlyteAdminErrorf(codes.Internal, err.Error()) + return errors.NewFlyteAdminErrorf(codes.Internal, err.Error()) //nolint } err = target.FlyteClient.FlyteworkflowV1alpha1().FlyteWorkflows(data.Namespace).Delete(ctx, data.ExecutionID.GetName(), v1.DeleteOptions{ PropagationPolicy: &deletePropagationBackground, diff --git a/flyteadmin/pkg/workflowengine/impl/k8s_executor_test.go b/flyteadmin/pkg/workflowengine/impl/k8s_executor_test.go index a2ecb51364..5b9db6dfe9 100644 --- a/flyteadmin/pkg/workflowengine/impl/k8s_executor_test.go +++ b/flyteadmin/pkg/workflowengine/impl/k8s_executor_test.go @@ -281,7 +281,7 @@ func TestExecute_MiscError(t *testing.T) { func TestAbort(t *testing.T) { fakeFlyteWorkflow := FakeFlyteWorkflow{} fakeFlyteWorkflow.deleteCallback = func(name string, options *v1.DeleteOptions) error { - assert.Equal(t, execID.Name, name) + assert.Equal(t, execID.GetName(), name) assert.Equal(t, options.PropagationPolicy, &deletePropagationBackground) return nil } @@ -306,7 +306,7 @@ func TestAbort_Notfound(t *testing.T) { return k8_api_err.NewNotFound(schema.GroupResource{ Group: "foo", Resource: "bar", - }, execID.Name) + }, execID.GetName()) } fakeFlyteWF.flyteWorkflowsCallback = func(ns string) v1alpha12.FlyteWorkflowInterface { assert.Equal(t, namespace, ns) diff --git a/flyteadmin/pkg/workflowengine/impl/prepare_execution.go b/flyteadmin/pkg/workflowengine/impl/prepare_execution.go index 169cb15616..70afadbd7b 100644 --- a/flyteadmin/pkg/workflowengine/impl/prepare_execution.go +++ b/flyteadmin/pkg/workflowengine/impl/prepare_execution.go @@ -26,20 +26,20 @@ func addMapValues(overrides map[string]string, defaultValues map[string]string) } func addPermissions(securityCtx *core.SecurityContext, roleNameKey string, flyteWf *v1alpha1.FlyteWorkflow) { - if securityCtx == nil || securityCtx.RunAs == nil { + if securityCtx == nil || securityCtx.GetRunAs() == nil { return } securityCtxCopy, _ := proto.Clone(securityCtx).(*core.SecurityContext) flyteWf.SecurityContext = *securityCtxCopy - if len(securityCtx.RunAs.IamRole) > 0 { + if len(securityCtx.GetRunAs().GetIamRole()) > 0 { if flyteWf.Annotations == nil { flyteWf.Annotations = map[string]string{} } - flyteWf.Annotations[roleNameKey] = securityCtx.RunAs.IamRole + flyteWf.Annotations[roleNameKey] = securityCtx.GetRunAs().GetIamRole() } - if len(securityCtx.RunAs.K8SServiceAccount) > 0 { - flyteWf.ServiceAccountName = securityCtx.RunAs.K8SServiceAccount + if len(securityCtx.GetRunAs().GetK8SServiceAccount()) > 0 { + flyteWf.ServiceAccountName = securityCtx.GetRunAs().GetK8SServiceAccount() } } @@ -53,14 +53,14 @@ func addExecutionOverrides(taskPluginOverrides []*admin.PluginOverride, }, } for _, override := range taskPluginOverrides { - executionConfig.TaskPluginImpls[override.TaskType] = v1alpha1.TaskPluginOverride{ - PluginIDs: override.PluginId, - MissingPluginBehavior: override.MissingPluginBehavior, + executionConfig.TaskPluginImpls[override.GetTaskType()] = v1alpha1.TaskPluginOverride{ + PluginIDs: override.GetPluginId(), + MissingPluginBehavior: override.GetMissingPluginBehavior(), } } if workflowExecutionConfig != nil { - executionConfig.MaxParallelism = uint32(workflowExecutionConfig.MaxParallelism) + executionConfig.MaxParallelism = uint32(workflowExecutionConfig.GetMaxParallelism()) // #nosec G115 if workflowExecutionConfig.GetInterruptible() != nil { interruptible := workflowExecutionConfig.GetInterruptible().GetValue() @@ -71,8 +71,8 @@ func addExecutionOverrides(taskPluginOverrides []*admin.PluginOverride, envs := make(map[string]string) if workflowExecutionConfig.GetEnvs() != nil { - for _, v := range workflowExecutionConfig.GetEnvs().Values { - envs[v.Key] = v.Value + for _, v := range workflowExecutionConfig.GetEnvs().GetValues() { + envs[v.GetKey()] = v.GetValue() } executionConfig.EnvironmentVariables = envs } @@ -134,7 +134,7 @@ func PrepareFlyteWorkflow(data interfaces.ExecutionData, flyteWorkflow *v1alpha1 // add permissions from auth and security context. Adding permissions from auth would be removed once all clients // have migrated over to security context - addPermissions(data.ExecutionParameters.ExecutionConfig.SecurityContext, + addPermissions(data.ExecutionParameters.ExecutionConfig.GetSecurityContext(), data.ExecutionParameters.RoleNameKey, flyteWorkflow) labels := addMapValues(data.ExecutionParameters.Labels, flyteWorkflow.Labels) diff --git a/flyteadmin/scheduler/dbapi/event_scheduler_impl.go b/flyteadmin/scheduler/dbapi/event_scheduler_impl.go index 972a04480f..bffcace058 100644 --- a/flyteadmin/scheduler/dbapi/event_scheduler_impl.go +++ b/flyteadmin/scheduler/dbapi/event_scheduler_impl.go @@ -36,10 +36,10 @@ func (s *eventScheduler) AddSchedule(ctx context.Context, input interfaces.AddSc var fixedRateUnit admin.FixedRateUnit switch v := input.ScheduleExpression.GetScheduleExpression().(type) { case *admin.Schedule_Rate: - fixedRateValue = v.Rate.Value - fixedRateUnit = v.Rate.Unit + fixedRateValue = v.Rate.GetValue() + fixedRateUnit = v.Rate.GetUnit() case *admin.Schedule_CronSchedule: - cronString = v.CronSchedule.Schedule + cronString = v.CronSchedule.GetSchedule() default: return fmt.Errorf("failed adding schedule for unknown schedule expression type %v", v) } @@ -48,13 +48,13 @@ func (s *eventScheduler) AddSchedule(ctx context.Context, input interfaces.AddSc CronExpression: cronString, FixedRateValue: fixedRateValue, Unit: fixedRateUnit, - KickoffTimeInputArg: input.ScheduleExpression.KickoffTimeInputArg, + KickoffTimeInputArg: input.ScheduleExpression.GetKickoffTimeInputArg(), Active: &active, SchedulableEntityKey: models.SchedulableEntityKey{ - Project: input.Identifier.Project, - Domain: input.Identifier.Domain, - Name: input.Identifier.Name, - Version: input.Identifier.Version, + Project: input.Identifier.GetProject(), + Domain: input.Identifier.GetDomain(), + Name: input.Identifier.GetName(), + Version: input.Identifier.GetVersion(), }, } err := s.db.SchedulableEntityRepo().Activate(ctx, modelInput) @@ -69,10 +69,10 @@ func (s *eventScheduler) RemoveSchedule(ctx context.Context, input interfaces.Re logger.Infof(ctx, "Received call to remove schedule [%+v]. Will deactivate it in the scheduler", input.Identifier) err := s.db.SchedulableEntityRepo().Deactivate(ctx, models.SchedulableEntityKey{ - Project: input.Identifier.Project, - Domain: input.Identifier.Domain, - Name: input.Identifier.Name, - Version: input.Identifier.Version, + Project: input.Identifier.GetProject(), + Domain: input.Identifier.GetDomain(), + Name: input.Identifier.GetName(), + Version: input.Identifier.GetVersion(), }) if err != nil { diff --git a/flyteadmin/scheduler/identifier/identifier.go b/flyteadmin/scheduler/identifier/identifier.go index 5d386e8652..8db71863b7 100644 --- a/flyteadmin/scheduler/identifier/identifier.go +++ b/flyteadmin/scheduler/identifier/identifier.go @@ -44,7 +44,7 @@ func GetExecutionIdentifier(ctx context.Context, identifier *core.Identifier, sc func hashIdentifier(ctx context.Context, identifier *core.Identifier) uint64 { h := fnv.New64() _, err := h.Write([]byte(fmt.Sprintf(scheduleNameInputsFormat, - identifier.Project, identifier.Domain, identifier.Name, identifier.Version))) + identifier.GetProject(), identifier.GetDomain(), identifier.GetName(), identifier.GetVersion()))) if err != nil { // This shouldn't occur. logger.Errorf(ctx, @@ -59,7 +59,7 @@ func hashIdentifier(ctx context.Context, identifier *core.Identifier) uint64 { func hashScheduledTimeStamp(ctx context.Context, identifier *core.Identifier, scheduledTime time.Time) uint64 { h := fnv.New64() _, err := h.Write([]byte(fmt.Sprintf(executionIDInputsFormat, - identifier.Project, identifier.Domain, identifier.Name, identifier.Version, scheduledTime.Unix()))) + identifier.GetProject(), identifier.GetDomain(), identifier.GetName(), identifier.GetVersion(), scheduledTime.Unix()))) if err != nil { // This shouldn't occur. logger.Errorf(ctx, diff --git a/flytecopilot/.golangci.yml b/flytecopilot/.golangci.yml index 7f4dbc80e8..71a85ec5c3 100644 --- a/flytecopilot/.golangci.yml +++ b/flytecopilot/.golangci.yml @@ -1,35 +1,25 @@ -# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. -# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY: -# -# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst - run: skip-dirs: - pkg/client - linters: disable-all: true enable: - - deadcode - errcheck - - gas + - gosec - gci - goconst - goimports - - golint - gosimple - govet - ineffassign - misspell - nakedret - staticcheck - - structcheck - typecheck - unconvert - unparam - unused - - varcheck - + - protogetter linters-settings: gci: custom-order: true @@ -38,3 +28,5 @@ linters-settings: - default - prefix(github.com/flyteorg) skip-generated: true + goconst: + ignore-tests: true diff --git a/flytecopilot/cmd/download_test.go b/flytecopilot/cmd/download_test.go index 16cda7c67d..b96ffd46a6 100644 --- a/flytecopilot/cmd/download_test.go +++ b/flytecopilot/cmd/download_test.go @@ -182,8 +182,8 @@ func TestDownloadOptions_Download(t *testing.T) { errProto := &core.ErrorDocument{} err = store.ReadProtobuf(ctx, errFile, errProto) assert.NoError(t, err) - if assert.NotNil(t, errProto.Error) { - assert.Equal(t, core.ContainerError_RECOVERABLE, errProto.Error.Kind) + if assert.NotNil(t, errProto.GetError()) { + assert.Equal(t, core.ContainerError_RECOVERABLE, errProto.GetError().GetKind()) } }) } diff --git a/flytecopilot/cmd/sidecar.go b/flytecopilot/cmd/sidecar.go index 09abdb31e5..179d6362f8 100644 --- a/flytecopilot/cmd/sidecar.go +++ b/flytecopilot/cmd/sidecar.go @@ -70,9 +70,9 @@ func (u *UploadOptions) uploader(ctx context.Context) error { logger.Errorf(ctx, "Bad interface passed, failed to unmarshal err: %s", err) return errors.Wrap(err, "Bad interface passed, failed to unmarshal, expected core.TypedInterface") } - outputInterface := iface.Outputs + outputInterface := iface.GetOutputs() - if iface.Outputs == nil || iface.Outputs.Variables == nil || len(iface.Outputs.Variables) == 0 { + if iface.GetOutputs() == nil || iface.Outputs.Variables == nil || len(iface.GetOutputs().GetVariables()) == 0 { logger.Infof(ctx, "Empty output interface received. Assuming void outputs. Sidecar will exit immediately.") return nil } diff --git a/flytecopilot/cmd/sidecar_test.go b/flytecopilot/cmd/sidecar_test.go index a7cc1c964a..2932e6fa9c 100644 --- a/flytecopilot/cmd/sidecar_test.go +++ b/flytecopilot/cmd/sidecar_test.go @@ -87,7 +87,7 @@ func TestUploadOptions_Upload(t *testing.T) { } success := path.Join(tmpDir, SuccessFile) - assert.NoError(t, ioutil.WriteFile(success, []byte("done"), os.ModePerm)) + assert.NoError(t, os.WriteFile(success, []byte("done"), os.ModePerm)) // #nosec G306 ok, err := containerwatcher.FileExists(success) assert.NoError(t, err) assert.True(t, ok, "successfile not created") diff --git a/flytecopilot/data/download.go b/flytecopilot/data/download.go index e4efa22222..24450697e7 100644 --- a/flytecopilot/data/download.go +++ b/flytecopilot/data/download.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "os" "path" "path/filepath" @@ -52,13 +51,13 @@ func (d Downloader) handleBlob(ctx context.Context, blob *core.Blob, toPath stri (download each part) (error on write or directory) (close streams safely, track success) (completion or report missing closures) */ - blobRef := storage.DataReference(blob.Uri) + blobRef := storage.DataReference(blob.GetUri()) scheme, _, _, err := blobRef.Split() if err != nil { return nil, errors.Wrapf(err, "Blob uri incorrectly formatted") } - if blob.GetMetadata().GetType().Dimensionality == core.BlobType_MULTIPART { + if blob.GetMetadata().GetType().GetDimensionality() == core.BlobType_MULTIPART { // Collect all parts of the multipart blob recursively (List API handles nested directories) // Set maxItems to 100 as a parameter for the List API, enabling batch retrieval of items until all are downloaded maxItems := 100 @@ -173,7 +172,7 @@ func (d Downloader) handleBlob(ctx context.Context, blob *core.Blob, toPath stri } logger.Infof(ctx, "successfully copied %d remote files from [%s] to local [%s]", downloadSuccess, blobRef, toPath) return toPath, nil - } else if blob.GetMetadata().GetType().Dimensionality == core.BlobType_SINGLE { + } else if blob.GetMetadata().GetType().GetDimensionality() == core.BlobType_SINGLE { // reader should be declared here (avoid being shared across all goroutines) var reader io.ReadCloser if scheme == "http" || scheme == "https" { @@ -214,14 +213,14 @@ func (d Downloader) handleBlob(ctx context.Context, blob *core.Blob, toPath stri } func (d Downloader) handleSchema(ctx context.Context, schema *core.Schema, toFilePath string) (interface{}, error) { - return d.handleBlob(ctx, &core.Blob{Uri: schema.Uri, Metadata: &core.BlobMetadata{Type: &core.BlobType{Dimensionality: core.BlobType_MULTIPART}}}, toFilePath) + return d.handleBlob(ctx, &core.Blob{Uri: schema.GetUri(), Metadata: &core.BlobMetadata{Type: &core.BlobType{Dimensionality: core.BlobType_MULTIPART}}}, toFilePath) } func (d Downloader) handleBinary(_ context.Context, b *core.Binary, toFilePath string, writeToFile bool) (interface{}, error) { // maybe we should return a map v := b.GetValue() if writeToFile { - return v, ioutil.WriteFile(toFilePath, v, os.ModePerm) + return v, os.WriteFile(toFilePath, v, os.ModePerm) // #nosec G306 } return v, nil } @@ -229,9 +228,9 @@ func (d Downloader) handleBinary(_ context.Context, b *core.Binary, toFilePath s func (d Downloader) handleError(_ context.Context, b *core.Error, toFilePath string, writeToFile bool) (interface{}, error) { // maybe we should return a map if writeToFile { - return b.Message, ioutil.WriteFile(toFilePath, []byte(b.Message), os.ModePerm) + return b.GetMessage(), os.WriteFile(toFilePath, []byte(b.GetMessage()), os.ModePerm) // #nosec G306 } - return b.Message, nil + return b.GetMessage(), nil } func (d Downloader) handleGeneric(ctx context.Context, b *structpb.Struct, toFilePath string, writeToFile bool) (interface{}, error) { @@ -259,7 +258,7 @@ func (d Downloader) handlePrimitive(primitive *core.Primitive, toFilePath string var v interface{} var err error - switch primitive.Value.(type) { + switch primitive.GetValue().(type) { case *core.Primitive_StringValue: v = primitive.GetStringValue() toByteArray = func() ([]byte, error) { @@ -307,7 +306,7 @@ func (d Downloader) handlePrimitive(primitive *core.Primitive, toFilePath string if err != nil { return nil, err } - return v, ioutil.WriteFile(toFilePath, b, os.ModePerm) + return v, os.WriteFile(toFilePath, b, os.ModePerm) // #nosec G306 } return v, nil } @@ -321,11 +320,11 @@ func (d Downloader) handleScalar(ctx context.Context, scalar *core.Scalar, toFil case *core.Scalar_Blob: b := scalar.GetBlob() i, err := d.handleBlob(ctx, b, toFilePath) - return i, &core.Scalar{Value: &core.Scalar_Blob{Blob: &core.Blob{Metadata: b.Metadata, Uri: toFilePath}}}, err + return i, &core.Scalar{Value: &core.Scalar_Blob{Blob: &core.Blob{Metadata: b.GetMetadata(), Uri: toFilePath}}}, err case *core.Scalar_Schema: b := scalar.GetSchema() i, err := d.handleSchema(ctx, b, toFilePath) - return i, &core.Scalar{Value: &core.Scalar_Schema{Schema: &core.Schema{Type: b.Type, Uri: toFilePath}}}, err + return i, &core.Scalar{Value: &core.Scalar_Schema{Schema: &core.Schema{Type: b.GetType(), Uri: toFilePath}}}, err case *core.Scalar_Binary: b := scalar.GetBinary() i, err := d.handleBinary(ctx, b, toFilePath, writeToFile) @@ -340,7 +339,7 @@ func (d Downloader) handleScalar(ctx context.Context, scalar *core.Scalar, toFil return i, scalar, err case *core.Scalar_NoneType: if writeToFile { - return nil, scalar, ioutil.WriteFile(toFilePath, []byte("null"), os.ModePerm) + return nil, scalar, os.WriteFile(toFilePath, []byte("null"), os.ModePerm) // #nosec G306 } return nil, scalar, nil default: @@ -359,6 +358,10 @@ func (d Downloader) handleLiteral(ctx context.Context, lit *core.Literal, filePa Scalar: s, }}, nil case *core.Literal_Collection: + err := os.MkdirAll(filePath, os.ModePerm) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to create directory [%s]", filePath) + } v, c2, err := d.handleCollection(ctx, lit.GetCollection(), filePath, writeToFile) if err != nil { return nil, nil, err @@ -367,6 +370,10 @@ func (d Downloader) handleLiteral(ctx context.Context, lit *core.Literal, filePa Collection: c2, }}, nil case *core.Literal_Map: + err := os.MkdirAll(filePath, os.ModePerm) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to create directory [%s]", filePath) + } v, m, err := d.RecursiveDownload(ctx, lit.GetMap(), filePath, writeToFile) if err != nil { return nil, nil, err @@ -381,12 +388,12 @@ func (d Downloader) handleLiteral(ctx context.Context, lit *core.Literal, filePa // Collection should be stored as a top level list file and may have accompanying files? func (d Downloader) handleCollection(ctx context.Context, c *core.LiteralCollection, dir string, writePrimitiveToFile bool) ([]interface{}, *core.LiteralCollection, error) { - if c == nil || len(c.Literals) == 0 { + if c == nil || len(c.GetLiterals()) == 0 { return []interface{}{}, c, nil } var collection []interface{} litCollection := &core.LiteralCollection{} - for i, lit := range c.Literals { + for i, lit := range c.GetLiterals() { filePath := path.Join(dir, strconv.Itoa(i)) v, lit, err := d.handleLiteral(ctx, lit, filePath, writePrimitiveToFile) if err != nil { @@ -406,11 +413,21 @@ type downloadedResult struct { func (d Downloader) RecursiveDownload(ctx context.Context, inputs *core.LiteralMap, dir string, writePrimitiveToFile bool) (VarMap, *core.LiteralMap, error) { childCtx, cancel := context.WithCancel(ctx) defer cancel() - if inputs == nil || len(inputs.Literals) == 0 { + if inputs == nil || len(inputs.GetLiterals()) == 0 { return VarMap{}, nil, nil } - f := make(FutureMap, len(inputs.Literals)) - for variable, literal := range inputs.Literals { + f := make(FutureMap, len(inputs.GetLiterals())) + for variable, literal := range inputs.GetLiterals() { + if literal.GetOffloadedMetadata() != nil { + offloadedMetadataURI := literal.GetOffloadedMetadata().GetUri() + // literal will be overwritten with the contents of the offloaded data which contains the actual large literal. + if err := d.store.ReadProtobuf(ctx, storage.DataReference(offloadedMetadataURI), literal); err != nil { + errString := fmt.Sprintf("Failed to read the object at location [%s] with error [%s]", offloadedMetadataURI, err) + logger.Error(ctx, errString) + return nil, nil, fmt.Errorf("%s", errString) + } + logger.Infof(ctx, "read object at location [%s]", offloadedMetadataURI) + } varPath := path.Join(dir, variable) lit := literal f[variable] = futures.NewAsyncFuture(childCtx, func(ctx2 context.Context) (interface{}, error) { @@ -468,7 +485,8 @@ func (d Downloader) DownloadInputs(ctx context.Context, inputRef storage.DataRef if err != nil { return err } - if err := ioutil.WriteFile(path.Join(outputDir, "inputs.pb"), b, os.ModePerm); err != nil { + // #nosec G306 + if err := os.WriteFile(path.Join(outputDir, "inputs.pb"), b, os.ModePerm); err != nil { return err } @@ -477,14 +495,14 @@ func (d Downloader) DownloadInputs(ctx context.Context, inputRef storage.DataRef if err != nil { return errors.Wrapf(err, "failed to marshal out inputs") } - return ioutil.WriteFile(path.Join(outputDir, "inputs.json"), m, os.ModePerm) + return os.WriteFile(path.Join(outputDir, "inputs.json"), m, os.ModePerm) // #nosec G306 } if d.format == core.DataLoadingConfig_YAML { m, err := yaml.Marshal(varMap) if err != nil { return errors.Wrapf(err, "failed to marshal out inputs") } - return ioutil.WriteFile(path.Join(outputDir, "inputs.yaml"), m, os.ModePerm) + return os.WriteFile(path.Join(outputDir, "inputs.yaml"), m, os.ModePerm) // #nosec G306 } return nil } diff --git a/flytecopilot/data/download_test.go b/flytecopilot/data/download_test.go index 1f3b3a7be6..dbc7cb33e7 100644 --- a/flytecopilot/data/download_test.go +++ b/flytecopilot/data/download_test.go @@ -7,11 +7,11 @@ import ( "path/filepath" "testing" + "github.com/stretchr/testify/assert" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" "github.com/flyteorg/flyte/flytestdlib/promutils" "github.com/flyteorg/flyte/flytestdlib/storage" - - "github.com/stretchr/testify/assert" ) func TestHandleBlobMultipart(t *testing.T) { @@ -19,9 +19,11 @@ func TestHandleBlobMultipart(t *testing.T) { s, err := storage.NewDataStore(&storage.Config{Type: storage.TypeMemory}, promutils.NewTestScope()) assert.NoError(t, err) ref := storage.DataReference("s3://container/folder/file1") - s.WriteRaw(context.Background(), ref, 0, storage.Options{}, bytes.NewReader([]byte{})) + err = s.WriteRaw(context.Background(), ref, 0, storage.Options{}, bytes.NewReader([]byte{})) + assert.NoError(t, err) ref = storage.DataReference("s3://container/folder/file2") - s.WriteRaw(context.Background(), ref, 0, storage.Options{}, bytes.NewReader([]byte{})) + err = s.WriteRaw(context.Background(), ref, 0, storage.Options{}, bytes.NewReader([]byte{})) + assert.NoError(t, err) d := Downloader{store: s} @@ -87,7 +89,8 @@ func TestHandleBlobSinglePart(t *testing.T) { s, err := storage.NewDataStore(&storage.Config{Type: storage.TypeMemory}, promutils.NewTestScope()) assert.NoError(t, err) ref := storage.DataReference("s3://container/file") - s.WriteRaw(context.Background(), ref, 0, storage.Options{}, bytes.NewReader([]byte{})) + err = s.WriteRaw(context.Background(), ref, 0, storage.Options{}, bytes.NewReader([]byte{})) + assert.NoError(t, err) d := Downloader{store: s} @@ -149,3 +152,162 @@ func TestHandleBlobHTTP(t *testing.T) { t.Errorf("expected file %s to exist", toPath) } } + +func TestRecursiveDownload(t *testing.T) { + t.Run("OffloadedMetadataContainsCollectionOfStrings", func(t *testing.T) { + s, err := storage.NewDataStore(&storage.Config{Type: storage.TypeMemory}, promutils.NewTestScope()) + assert.NoError(t, err) + + d := Downloader{store: s} + + offloadedLiteral := &core.Literal{ + Value: &core.Literal_OffloadedMetadata{ + OffloadedMetadata: &core.LiteralOffloadedMetadata{ + Uri: "s3://container/offloaded", + }, + }, + } + + inputs := &core.LiteralMap{ + Literals: map[string]*core.Literal{ + "input1": offloadedLiteral, + }, + } + + // Mock reading the offloaded metadata + err = s.WriteProtobuf(context.Background(), storage.DataReference("s3://container/offloaded"), storage.Options{}, &core.Literal{ + Value: &core.Literal_Collection{ + Collection: &core.LiteralCollection{ + Literals: []*core.Literal{ + { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_StringValue{ + StringValue: "string1", + }, + }, + }, + }, + }, + }, + { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_StringValue{ + StringValue: "string2", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }) + assert.NoError(t, err) + + toPath := "./inputs" + defer func() { + err := os.RemoveAll(toPath) + if err != nil { + t.Errorf("Failed to delete directory: %v", err) + } + }() + + varMap, lMap, err := d.RecursiveDownload(context.Background(), inputs, toPath, true) + assert.NoError(t, err) + assert.NotNil(t, varMap) + assert.NotNil(t, lMap) + assert.Equal(t, []interface{}{"string1", "string2"}, varMap["input1"]) + // Check if files were created and data written + for _, file := range []string{"0", "1"} { + if _, err := os.Stat(filepath.Join(toPath, "input1", file)); os.IsNotExist(err) { + t.Errorf("expected file %s to exist", file) + } + } + }) + + t.Run("OffloadedMetadataContainsMapOfStringString", func(t *testing.T) { + s, err := storage.NewDataStore(&storage.Config{Type: storage.TypeMemory}, promutils.NewTestScope()) + assert.NoError(t, err) + + d := Downloader{store: s} + + offloadedLiteral := &core.Literal{ + Value: &core.Literal_OffloadedMetadata{ + OffloadedMetadata: &core.LiteralOffloadedMetadata{ + Uri: "s3://container/offloaded", + }, + }, + } + + inputs := &core.LiteralMap{ + Literals: map[string]*core.Literal{ + "input1": offloadedLiteral, + }, + } + + // Mock reading the offloaded metadata + err = s.WriteProtobuf(context.Background(), storage.DataReference("s3://container/offloaded"), storage.Options{}, &core.Literal{ + Value: &core.Literal_Map{ + Map: &core.LiteralMap{ + Literals: map[string]*core.Literal{ + "key1": { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_StringValue{ + StringValue: "value1", + }, + }, + }, + }, + }, + }, + "key2": { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_StringValue{ + StringValue: "value2", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }) + assert.NoError(t, err) + + toPath := "./inputs" + defer func() { + err := os.RemoveAll(toPath) + if err != nil { + t.Errorf("Failed to delete directory: %v", err) + } + }() + + varMap, lMap, err := d.RecursiveDownload(context.Background(), inputs, toPath, true) + assert.NoError(t, err) + assert.NotNil(t, varMap) + assert.NotNil(t, lMap) + assert.Equal(t, "value1", varMap["input1"].(VarMap)["key1"]) + assert.Equal(t, "value2", varMap["input1"].(VarMap)["key2"]) + + for _, file := range []string{"key1", "key2"} { + if _, err := os.Stat(filepath.Join(toPath, "input1", file)); os.IsNotExist(err) { + t.Errorf("expected file %s to exist", file) + } + } + }) +} diff --git a/flytecopilot/data/upload.go b/flytecopilot/data/upload.go index 6cb3831b4c..2103028d28 100644 --- a/flytecopilot/data/upload.go +++ b/flytecopilot/data/upload.go @@ -135,8 +135,8 @@ func (u Uploader) RecursiveUpload(ctx context.Context, vars *core.VariableMap, f return errors.Errorf("User Error: %s", string(b)) } - varFutures := make(map[string]futures.Future, len(vars.Variables)) - for varName, variable := range vars.Variables { + varFutures := make(map[string]futures.Future, len(vars.GetVariables())) + for varName, variable := range vars.GetVariables() { varPath := path.Join(fromPath, varName) varType := variable.GetType() switch varType.GetType().(type) { diff --git a/flytecopilot/data/upload_test.go b/flytecopilot/data/upload_test.go index a98595a918..0d51d3c8df 100644 --- a/flytecopilot/data/upload_test.go +++ b/flytecopilot/data/upload_test.go @@ -3,7 +3,7 @@ package data import ( "context" "fmt" - "io/ioutil" + "io" "os" "path" "testing" @@ -21,7 +21,7 @@ func TestUploader_RecursiveUpload(t *testing.T) { tmpPrefix := "upload_test" t.Run("upload-blob", func(t *testing.T) { - tmpDir, err := ioutil.TempDir(tmpFolderLocation, tmpPrefix) + tmpDir, err := os.MkdirTemp(tmpFolderLocation, tmpPrefix) assert.NoError(t, err) defer func() { assert.NoError(t, os.RemoveAll(tmpDir)) @@ -36,7 +36,7 @@ func TestUploader_RecursiveUpload(t *testing.T) { } data := []byte("data") - assert.NoError(t, ioutil.WriteFile(path.Join(tmpDir, "x"), data, os.ModePerm)) + assert.NoError(t, os.WriteFile(path.Join(tmpDir, "x"), data, os.ModePerm)) // #nosec G306 fmt.Printf("Written to %s ", path.Join(tmpDir, "x")) store, err := storage.NewDataStore(&storage.Config{Type: storage.TypeMemory}, promutils.NewTestScope()) @@ -49,15 +49,15 @@ func TestUploader_RecursiveUpload(t *testing.T) { outputs := &core.LiteralMap{} assert.NoError(t, store.ReadProtobuf(context.TODO(), outputRef, outputs)) - assert.Len(t, outputs.Literals, 1) - assert.NotNil(t, outputs.Literals["x"]) - assert.NotNil(t, outputs.Literals["x"].GetScalar()) - assert.NotNil(t, outputs.Literals["x"].GetScalar().GetBlob()) - ref := storage.DataReference(outputs.Literals["x"].GetScalar().GetBlob().GetUri()) + assert.Len(t, outputs.GetLiterals(), 1) + assert.NotNil(t, outputs.GetLiterals()["x"]) + assert.NotNil(t, outputs.GetLiterals()["x"].GetScalar()) + assert.NotNil(t, outputs.GetLiterals()["x"].GetScalar().GetBlob()) + ref := storage.DataReference(outputs.GetLiterals()["x"].GetScalar().GetBlob().GetUri()) r, err := store.ReadRaw(context.TODO(), ref) assert.NoError(t, err, "%s does not exist", ref) defer r.Close() - b, err := ioutil.ReadAll(r) + b, err := io.ReadAll(r) assert.NoError(t, err) assert.Equal(t, string(data), string(b), "content dont match") }) diff --git a/flytecopilot/data/utils_test.go b/flytecopilot/data/utils_test.go index 56cc3cc426..517f9d30ef 100644 --- a/flytecopilot/data/utils_test.go +++ b/flytecopilot/data/utils_test.go @@ -3,7 +3,6 @@ package data import ( "bytes" "context" - "io/ioutil" "os" "path" "testing" @@ -19,7 +18,7 @@ func TestIsFileReadable(t *testing.T) { tmpFolderLocation := "" tmpPrefix := "util_test" - tmpDir, err := ioutil.TempDir(tmpFolderLocation, tmpPrefix) + tmpDir, err := os.MkdirTemp(tmpFolderLocation, tmpPrefix) assert.NoError(t, err) defer func() { assert.NoError(t, os.RemoveAll(tmpDir)) @@ -30,7 +29,7 @@ func TestIsFileReadable(t *testing.T) { assert.Empty(t, f) assert.Nil(t, i) - assert.NoError(t, ioutil.WriteFile(p, []byte("data"), os.ModePerm)) + assert.NoError(t, os.WriteFile(p, []byte("data"), os.ModePerm)) // #nosec G306 f, i, err = IsFileReadable(p, false) assert.NoError(t, err) assert.Equal(t, p, f) @@ -42,7 +41,7 @@ func TestIsFileReadable(t *testing.T) { _, _, err = IsFileReadable(noExt, false) assert.Error(t, err) - assert.NoError(t, ioutil.WriteFile(p, []byte("data"), os.ModePerm)) + assert.NoError(t, os.WriteFile(p, []byte("data"), os.ModePerm)) // #nosec G306 _, _, err = IsFileReadable(noExt, false) assert.Error(t, err) @@ -57,7 +56,7 @@ func TestUploadFile(t *testing.T) { tmpFolderLocation := "" tmpPrefix := "util_test" - tmpDir, err := ioutil.TempDir(tmpFolderLocation, tmpPrefix) + tmpDir, err := os.MkdirTemp(tmpFolderLocation, tmpPrefix) assert.NoError(t, err) defer func() { assert.NoError(t, os.RemoveAll(tmpDir)) @@ -66,7 +65,7 @@ func TestUploadFile(t *testing.T) { exist := path.Join(tmpDir, "exist-file") data := []byte("data") l := int64(len(data)) - assert.NoError(t, ioutil.WriteFile(exist, data, os.ModePerm)) + assert.NoError(t, os.WriteFile(exist, data, os.ModePerm)) // #nosec G306 nonExist := path.Join(tmpDir, "non-exist-file") store, err := storage.NewDataStore(&storage.Config{Type: storage.TypeMemory}, promutils.NewTestScope()) diff --git a/flytecopilot/go.mod b/flytecopilot/go.mod index d943bb5153..a8071b5a8a 100644 --- a/flytecopilot/go.mod +++ b/flytecopilot/go.mod @@ -82,6 +82,7 @@ require ( github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.53.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect + github.com/shamaton/msgpack/v2 v2.2.2 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/afero v1.8.2 // indirect github.com/spf13/cast v1.4.1 // indirect diff --git a/flytecopilot/go.sum b/flytecopilot/go.sum index b1f65b79e1..8f33fe7002 100644 --- a/flytecopilot/go.sum +++ b/flytecopilot/go.sum @@ -309,6 +309,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shamaton/msgpack/v2 v2.2.2 h1:GOIg0c9LV04VwzOOqZSrmsv/JzjNOOMxnS/HvOHGdgs= +github.com/shamaton/msgpack/v2 v2.2.2/go.mod h1:6khjYnkx73f7VQU7wjcFS9DFjs+59naVWJv1TB7qdOI= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= diff --git a/flytectl/.golangci.yml b/flytectl/.golangci.yml index b7e8525336..12de11bf56 100644 --- a/flytectl/.golangci.yml +++ b/flytectl/.golangci.yml @@ -1,37 +1,30 @@ -# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. -# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY: -# -# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst - run: skip-dirs: - pkg/client - linters: disable-all: true enable: - - deadcode - errcheck - - gas + - gosec - gci - goconst - goimports - - golint - gosimple - govet - ineffassign - misspell - nakedret - staticcheck - - structcheck - typecheck - unconvert - unparam - unused - - varcheck + - protogetter linters-settings: gci: skip-generated: true + goconst: + ignore-tests: true issues: exclude: - copylocks diff --git a/flytectl/cmd/compile/compile.go b/flytectl/cmd/compile/compile.go index 22c4796e1d..7c5adcf56f 100644 --- a/flytectl/cmd/compile/compile.go +++ b/flytectl/cmd/compile/compile.go @@ -61,16 +61,16 @@ func compileFromPackage(packagePath string) error { case *admin.TaskSpec: tasks = append(tasks, v) case *admin.WorkflowSpec: - workflows[v.Template.Id.Name] = v + workflows[v.GetTemplate().GetId().GetName()] = v case *admin.LaunchPlan: - plans[v.Id.Name] = v + plans[v.GetId().GetName()] = v } } // compile tasks taskTemplates := []*core.TaskTemplate{} for _, task := range tasks { - taskTemplates = append(taskTemplates, task.Template) + taskTemplates = append(taskTemplates, task.GetTemplate()) } fmt.Println("\nCompiling tasks...") @@ -107,13 +107,13 @@ func handleWorkflow( compiledLaunchPlanProviders []common.InterfaceProvider, plans map[string]*admin.LaunchPlan, workflows map[string]*admin.WorkflowSpec) ([]common.InterfaceProvider, error) { - reqs, _ := compiler.GetRequirements(workflow.Template, workflow.SubWorkflows) - wfName := workflow.Template.Id.Name + reqs, _ := compiler.GetRequirements(workflow.GetTemplate(), workflow.GetSubWorkflows()) + wfName := workflow.GetTemplate().GetId().GetName() // Check if all the subworkflows referenced by launchplan are compiled for i := range reqs.GetRequiredLaunchPlanIds() { lpID := reqs.GetRequiredLaunchPlanIds()[i] - lpWfName := plans[lpID.Name].Spec.WorkflowId.Name + lpWfName := plans[lpID.GetName()].GetSpec().GetWorkflowId().GetName() missingWorkflow := workflows[lpWfName] if compiledWorkflows[lpWfName] == nil { // Recursively compile the missing workflow first @@ -127,8 +127,8 @@ func handleWorkflow( fmt.Println("\nCompiling workflow:", wfName) - wf, err := compiler.CompileWorkflow(workflow.Template, - workflow.SubWorkflows, + wf, err := compiler.CompileWorkflow(workflow.GetTemplate(), + workflow.GetSubWorkflows(), compiledTasks, compiledLaunchPlanProviders) @@ -140,11 +140,11 @@ func handleWorkflow( // Update the expected inputs and outputs for the launchplans which reference this workflow for _, plan := range plans { - if plan.Spec.WorkflowId.Name == wfName { - plan.Closure.ExpectedOutputs = wf.Primary.Template.Interface.Outputs + if plan.GetSpec().GetWorkflowId().GetName() == wfName { + plan.Closure.ExpectedOutputs = wf.GetPrimary().GetTemplate().GetInterface().GetOutputs() newMap := make(map[string]*core.Parameter) - for key, value := range wf.Primary.Template.Interface.Inputs.Variables { + for key, value := range wf.GetPrimary().GetTemplate().GetInterface().GetInputs().GetVariables() { newMap[key] = &core.Parameter{ Var: value, } diff --git a/flytectl/cmd/config/subcommand/project/project_config.go b/flytectl/cmd/config/subcommand/project/project_config.go index d76030faa7..fd71628448 100644 --- a/flytectl/cmd/config/subcommand/project/project_config.go +++ b/flytectl/cmd/config/subcommand/project/project_config.go @@ -73,18 +73,18 @@ func (c *ConfigProject) GetProjectSpec(cf *config.Config) (*admin.Project, error } project := cf.Project - if len(projectSpec.Id) == 0 && len(project) == 0 { - err := fmt.Errorf(clierrors.ErrProjectNotPassed) + if len(projectSpec.GetId()) == 0 && len(project) == 0 { + err := fmt.Errorf(clierrors.ErrProjectNotPassed) //nolint return nil, err } - if len(projectSpec.Id) > 0 && len(project) > 0 { - err := fmt.Errorf(clierrors.ErrProjectIDBothPassed) + if len(projectSpec.GetId()) > 0 && len(project) > 0 { + err := fmt.Errorf(clierrors.ErrProjectIDBothPassed) //nolint return nil, err } // Get projectId from file, if not provided, fall back to project - if len(projectSpec.Id) == 0 { + if len(projectSpec.GetId()) == 0 { projectSpec.Id = project } return &projectSpec, nil @@ -104,7 +104,7 @@ func (c *ConfigProject) MapToAdminState() (admin.Project_ProjectState, error) { if activate || archive { if activate == archive { - return admin.Project_ACTIVE, fmt.Errorf(clierrors.ErrInvalidStateUpdate) + return admin.Project_ACTIVE, fmt.Errorf(clierrors.ErrInvalidStateUpdate) //nolint } if archive { return admin.Project_ARCHIVED, nil diff --git a/flytectl/cmd/config/subcommand/project/project_config_test.go b/flytectl/cmd/config/subcommand/project/project_config_test.go index bbaf521375..b111cace09 100644 --- a/flytectl/cmd/config/subcommand/project/project_config_test.go +++ b/flytectl/cmd/config/subcommand/project/project_config_test.go @@ -20,7 +20,7 @@ func TestGetProjectSpec(t *testing.T) { } response, err := c.GetProjectSpec(cf) assert.Nil(t, err) - assert.Equal(t, "flytesnacks1", response.Id) + assert.Equal(t, "flytesnacks1", response.GetId()) }) t.Run("Error if project and ID both exist", func(t *testing.T) { @@ -38,8 +38,8 @@ func TestGetProjectSpec(t *testing.T) { } response, err := c.GetProjectSpec(&config.Config{}) assert.Nil(t, err) - assert.Equal(t, "flytesnacks", response.Name) - assert.Equal(t, "flytesnacks test", response.Description) + assert.Equal(t, "flytesnacks", response.GetName()) + assert.Equal(t, "flytesnacks test", response.GetDescription()) }) } diff --git a/flytectl/cmd/config/subcommand/project_domain_workflow_getter.go b/flytectl/cmd/config/subcommand/project_domain_workflow_getter.go index d8fd83f043..fbf43964bd 100644 --- a/flytectl/cmd/config/subcommand/project_domain_workflow_getter.go +++ b/flytectl/cmd/config/subcommand/project_domain_workflow_getter.go @@ -33,7 +33,7 @@ func (g PDWGetterCommandLine) GetDomain() string { // GetWorkflow returns the first argument from the commandline func (g PDWGetterCommandLine) GetWorkflow() string { - if g.Args == nil || len(g.Args) == 0 { + if len(g.Args) == 0 { return "" } return g.Args[0] diff --git a/flytectl/cmd/config/subcommand/sandbox/config_flags.go b/flytectl/cmd/config/subcommand/sandbox/config_flags.go index 32e1423057..4359b5db3c 100755 --- a/flytectl/cmd/config/subcommand/sandbox/config_flags.go +++ b/flytectl/cmd/config/subcommand/sandbox/config_flags.go @@ -62,5 +62,6 @@ func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { cmdFlags.BoolVar(&DefaultConfig.Dev, fmt.Sprintf("%v%v", prefix, "dev"), DefaultConfig.Dev, "Optional. Only start minio and postgres in the sandbox.") cmdFlags.BoolVar(&DefaultConfig.DryRun, fmt.Sprintf("%v%v", prefix, "dryRun"), DefaultConfig.DryRun, "Optional. Only print the docker commands to bring up flyte sandbox/demo container.This will still call github api's to get the latest flyte release to use'") cmdFlags.BoolVar(&DefaultConfig.Force, fmt.Sprintf("%v%v", prefix, "force"), DefaultConfig.Force, "Optional. Forcefully delete existing sandbox cluster if it exists.") + cmdFlags.StringVar(&DefaultConfig.Port, fmt.Sprintf("%v%v", prefix, "port"), DefaultConfig.Port, "Optional. Specify the port for the Kubernetes in the sandbox.") return cmdFlags } diff --git a/flytectl/cmd/config/subcommand/sandbox/config_flags_test.go b/flytectl/cmd/config/subcommand/sandbox/config_flags_test.go index 8519a75583..436cdad43a 100755 --- a/flytectl/cmd/config/subcommand/sandbox/config_flags_test.go +++ b/flytectl/cmd/config/subcommand/sandbox/config_flags_test.go @@ -265,4 +265,18 @@ func TestConfig_SetFlags(t *testing.T) { } }) }) + t.Run("Test_port", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("port", testValue) + if vString, err := cmdFlags.GetString("port"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Port) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) } diff --git a/flytectl/cmd/config/subcommand/sandbox/sandbox_config.go b/flytectl/cmd/config/subcommand/sandbox/sandbox_config.go index 47ae4918d5..a8d097ea46 100644 --- a/flytectl/cmd/config/subcommand/sandbox/sandbox_config.go +++ b/flytectl/cmd/config/subcommand/sandbox/sandbox_config.go @@ -1,6 +1,10 @@ package sandbox -import "github.com/flyteorg/flyte/flytectl/pkg/docker" +import ( + "fmt" + + "github.com/flyteorg/flyte/flytectl/pkg/docker" +) // Config holds configuration flags for sandbox command. type Config struct { @@ -36,9 +40,18 @@ type Config struct { DryRun bool `json:"dryRun" pflag:",Optional. Only print the docker commands to bring up flyte sandbox/demo container.This will still call github api's to get the latest flyte release to use'"` Force bool `json:"force" pflag:",Optional. Forcefully delete existing sandbox cluster if it exists."` + + // Allow user to specify the port for the sandbox + Port string `json:"port" pflag:",Optional. Specify the port for the Kubernetes in the sandbox."` } //go:generate pflags Config --default-var DefaultConfig --bind-default-var var ( - DefaultConfig = &Config{} + DefaultConfig = &Config{ + Port: "6443", // Default port for the sandbox + } ) + +func (c Config) GetK8sEndpoint() string { + return fmt.Sprintf("https://127.0.0.1:%s", c.Port) +} diff --git a/flytectl/cmd/create/execution.go b/flytectl/cmd/create/execution.go index 5da311357b..ff8b57fea8 100644 --- a/flytectl/cmd/create/execution.go +++ b/flytectl/cmd/create/execution.go @@ -260,7 +260,7 @@ func createExecutionCommand(ctx context.Context, args []string, cmdCtx cmdCore.C if _err != nil { return _err } - fmt.Printf("execution identifier %v\n", exec.Id) + fmt.Printf("execution identifier %v\n", exec.GetId()) } return nil } diff --git a/flytectl/cmd/create/execution_test.go b/flytectl/cmd/create/execution_test.go index d01b683e02..59fb2b0213 100644 --- a/flytectl/cmd/create/execution_test.go +++ b/flytectl/cmd/create/execution_test.go @@ -264,7 +264,7 @@ func (s *createSuite) Test_CreateRelaunchExecution() { Name: "f652ea3596e7f4d80a0e", }, } - executionConfig.Relaunch = relaunchExecResponse.Id.Name + executionConfig.Relaunch = relaunchExecResponse.GetId().GetName() relaunchRequest := &admin.ExecutionRelaunchRequest{ Id: &core.WorkflowExecutionIdentifier{ Name: executionConfig.Relaunch, diff --git a/flytectl/cmd/create/execution_util.go b/flytectl/cmd/create/execution_util.go index bcb5c5639f..4b5813ec1e 100644 --- a/flytectl/cmd/create/execution_util.go +++ b/flytectl/cmd/create/execution_util.go @@ -51,7 +51,7 @@ func createExecutionRequestForWorkflow(ctx context.Context, workflowName, projec } } - return createExecutionRequest(lp.Id, inputs, envs, securityContext, authRole, targetExecName, executionConfig.TargetExecutionCluster), nil + return createExecutionRequest(lp.GetId(), inputs, envs, securityContext, authRole, targetExecName, executionConfig.TargetExecutionCluster), nil } func createExecutionRequestForTask(ctx context.Context, taskName string, project string, domain string, @@ -95,8 +95,8 @@ func createExecutionRequestForTask(ctx context.Context, taskName string, project ResourceType: core.ResourceType_TASK, Project: project, Domain: domain, - Name: task.Id.Name, - Version: task.Id.Version, + Name: task.GetId().GetName(), + Version: task.GetId().GetVersion(), } return createExecutionRequest(id, inputs, envs, securityContext, authRole, targetExecName, executionConfig.TargetExecutionCluster), nil @@ -120,7 +120,7 @@ func relaunchExecution(ctx context.Context, executionName string, project string if err != nil { return err } - fmt.Printf("execution identifier %v\n", relaunchedExec.Id) + fmt.Printf("execution identifier %v\n", relaunchedExec.GetId()) return nil } @@ -141,7 +141,7 @@ func recoverExecution(ctx context.Context, executionName string, project string, if err != nil { return err } - fmt.Printf("execution identifier %v\n", recoveredExec.Id) + fmt.Printf("execution identifier %v\n", recoveredExec.GetId()) return nil } diff --git a/flytectl/cmd/create/execution_util_test.go b/flytectl/cmd/create/execution_util_test.go index e27ba4a96b..c77c1c194b 100644 --- a/flytectl/cmd/create/execution_util_test.go +++ b/flytectl/cmd/create/execution_util_test.go @@ -134,7 +134,7 @@ func TestCreateExecutionRequestForWorkflow(t *testing.T) { execCreateRequest, err := createExecutionRequestForWorkflow(s.Ctx, "wfName", config.GetConfig().Project, config.GetConfig().Domain, s.CmdCtx, executionConfigWithEnvs, "") assert.Nil(t, err) assert.NotNil(t, execCreateRequest) - assert.Equal(t, "cluster", execCreateRequest.Spec.ExecutionClusterLabel.Value) + assert.Equal(t, "cluster", execCreateRequest.GetSpec().GetExecutionClusterLabel().GetValue()) }) t.Run("failed literal conversion", func(t *testing.T) { s := testutils.Setup(t) diff --git a/flytectl/cmd/create/project.go b/flytectl/cmd/create/project.go index fa1046a248..777ec7a5db 100644 --- a/flytectl/cmd/create/project.go +++ b/flytectl/cmd/create/project.go @@ -49,11 +49,11 @@ func createProjectsCommand(ctx context.Context, args []string, cmdCtx cmdCore.Co if err != nil { return err } - if projectSpec.Id == "" { - return fmt.Errorf(clierrors.ErrProjectNotPassed) + if projectSpec.GetId() == "" { + return fmt.Errorf(clierrors.ErrProjectNotPassed) //nolint } - if projectSpec.Name == "" { - return fmt.Errorf(clierrors.ErrProjectNameNotPassed) + if projectSpec.GetName() == "" { + return fmt.Errorf(clierrors.ErrProjectNameNotPassed) //nolint } if project.DefaultProjectConfig.DryRun { @@ -61,10 +61,10 @@ func createProjectsCommand(ctx context.Context, args []string, cmdCtx cmdCore.Co } else { _, err := cmdCtx.AdminClient().RegisterProject(ctx, &admin.ProjectRegisterRequest{ Project: &admin.Project{ - Id: projectSpec.Id, - Name: projectSpec.Name, - Description: projectSpec.Description, - Labels: projectSpec.Labels, + Id: projectSpec.GetId(), + Name: projectSpec.GetName(), + Description: projectSpec.GetDescription(), + Labels: projectSpec.GetLabels(), }, }) if err != nil { diff --git a/flytectl/cmd/demo/demo.go b/flytectl/cmd/demo/demo.go index 12dcb66fc3..72f0a07ef8 100644 --- a/flytectl/cmd/demo/demo.go +++ b/flytectl/cmd/demo/demo.go @@ -6,11 +6,6 @@ import ( "github.com/spf13/cobra" ) -const ( - flyteNs = "flyte" - K8sEndpoint = "https://127.0.0.1:6443" -) - // Long descriptions are whitespace sensitive when generating docs using sphinx. const ( demoShort = `Helps with demo interactions like start, teardown, status, and exec.` diff --git a/flytectl/cmd/demo/reload.go b/flytectl/cmd/demo/reload.go index dee3086e35..92f06d77df 100644 --- a/flytectl/cmd/demo/reload.go +++ b/flytectl/cmd/demo/reload.go @@ -4,16 +4,14 @@ import ( "context" "fmt" + sandboxCmdConfig "github.com/flyteorg/flyte/flytectl/cmd/config/subcommand/sandbox" cmdCore "github.com/flyteorg/flyte/flytectl/cmd/core" "github.com/flyteorg/flyte/flytectl/pkg/docker" - "github.com/flyteorg/flyte/flytectl/pkg/k8s" - "github.com/flyteorg/flyte/flytestdlib/logger" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/flyteorg/flyte/flytectl/pkg/sandbox" ) const ( internalBootstrapAgent = "flyte-sandbox-bootstrap" - labelSelector = "app.kubernetes.io/name=flyte-binary" ) const ( reloadShort = "Power cycle the Flyte executable pod, effectively picking up an updated config." @@ -73,7 +71,7 @@ func reloadDemoCluster(ctx context.Context, args []string, cmdCtx cmdCore.Comman return err } if useLegacyMethod { - return legacyReloadDemoCluster(ctx) + return sandbox.LegacyReloadDemoCluster(ctx, sandboxCmdConfig.DefaultConfig) } // At this point we know that we are on a modern sandbox, and we can use the @@ -88,32 +86,3 @@ func reloadDemoCluster(ctx context.Context, args []string, cmdCtx cmdCore.Comman return nil } - -// legacyReloadDemoCluster will kill the flyte binary pod so the new one can pick up a new config file -func legacyReloadDemoCluster(ctx context.Context) error { - k8sClient, err := k8s.GetK8sClient(docker.Kubeconfig, K8sEndpoint) - if err != nil { - fmt.Println("Could not get K8s client") - return err - } - pi := k8sClient.CoreV1().Pods(flyteNs) - podList, err := pi.List(ctx, v1.ListOptions{LabelSelector: labelSelector}) - if err != nil { - fmt.Println("could not list pods") - return err - } - if len(podList.Items) != 1 { - return fmt.Errorf("should only have one pod running, %d found, %v", len(podList.Items), podList.Items) - } - logger.Debugf(ctx, "Found %d pods\n", len(podList.Items)) - var grace = int64(0) - err = pi.Delete(ctx, podList.Items[0].Name, v1.DeleteOptions{ - GracePeriodSeconds: &grace, - }) - if err != nil { - fmt.Printf("Could not delete Flyte pod, old configuration may still be in effect. Err: %s\n", err) - return err - } - - return nil -} diff --git a/flytectl/cmd/demo/start.go b/flytectl/cmd/demo/start.go index fa3de39101..234d203ca3 100644 --- a/flytectl/cmd/demo/start.go +++ b/flytectl/cmd/demo/start.go @@ -20,6 +20,11 @@ Starts the demo cluster without any source code: flytectl demo start +Starts the demo cluster with different port: +:: + + flytectl demo start --port 6443 + Runs a dev cluster, which only has minio and postgres pod. :: diff --git a/flytectl/cmd/get/execution.go b/flytectl/cmd/get/execution.go index 8bfcc36e00..dca51dd9a3 100644 --- a/flytectl/cmd/get/execution.go +++ b/flytectl/cmd/get/execution.go @@ -118,7 +118,7 @@ func getCallBack(ctx context.Context, cmdCtx cmdCore.CommandContext) bubbletea.D if err != nil { return nil, err } - return ExecutionToProtoMessages(executionList.Executions), nil + return ExecutionToProtoMessages(executionList.GetExecutions()), nil } } @@ -160,7 +160,7 @@ func getExecutionFunc(ctx context.Context, args []string, cmdCtx cmdCore.Command if err != nil { return err } - logger.Infof(ctx, "Retrieved %v executions", len(executionList.Executions)) + logger.Infof(ctx, "Retrieved %v executions", len(executionList.GetExecutions())) return adminPrinter.Print(config.GetConfig().MustOutputFormat(), executionColumns, - ExecutionToProtoMessages(executionList.Executions)...) + ExecutionToProtoMessages(executionList.GetExecutions())...) } diff --git a/flytectl/cmd/get/execution_util.go b/flytectl/cmd/get/execution_util.go index 45214a7ac8..ea99872f9b 100644 --- a/flytectl/cmd/get/execution_util.go +++ b/flytectl/cmd/get/execution_util.go @@ -43,7 +43,7 @@ func WriteExecConfigToFile(executionConfig ExecutionConfig, fileName string) err func CreateAndWriteExecConfigForTask(task *admin.Task, fileName string) error { var err error - executionConfig := ExecutionConfig{Task: task.Id.Name, Version: task.Id.Version} + executionConfig := ExecutionConfig{Task: task.GetId().GetName(), Version: task.GetId().GetVersion()} if executionConfig.Inputs, err = ParamMapForTask(task); err != nil { return err } @@ -52,7 +52,7 @@ func CreateAndWriteExecConfigForTask(task *admin.Task, fileName string) error { func CreateAndWriteExecConfigForWorkflow(wlp *admin.LaunchPlan, fileName string) error { var err error - executionConfig := ExecutionConfig{Workflow: wlp.Id.Name, Version: wlp.Id.Version} + executionConfig := ExecutionConfig{Workflow: wlp.GetId().GetName(), Version: wlp.GetId().GetVersion()} if executionConfig.Inputs, err = ParamMapForWorkflow(wlp); err != nil { return err } @@ -61,31 +61,31 @@ func CreateAndWriteExecConfigForWorkflow(wlp *admin.LaunchPlan, fileName string) func TaskInputs(task *admin.Task) map[string]*core.Variable { taskInputs := map[string]*core.Variable{} - if task == nil || task.Closure == nil { + if task == nil || task.GetClosure() == nil { return taskInputs } - if task.Closure.CompiledTask == nil { + if task.GetClosure().GetCompiledTask() == nil { return taskInputs } - if task.Closure.CompiledTask.Template == nil { + if task.GetClosure().GetCompiledTask().GetTemplate() == nil { return taskInputs } - if task.Closure.CompiledTask.Template.Interface == nil { + if task.GetClosure().GetCompiledTask().GetTemplate().GetInterface() == nil { return taskInputs } - if task.Closure.CompiledTask.Template.Interface.Inputs == nil { + if task.GetClosure().GetCompiledTask().GetTemplate().GetInterface().GetInputs() == nil { return taskInputs } - return task.Closure.CompiledTask.Template.Interface.Inputs.Variables + return task.GetClosure().GetCompiledTask().GetTemplate().GetInterface().GetInputs().GetVariables() } func ParamMapForTask(task *admin.Task) (map[string]yaml.Node, error) { taskInputs := TaskInputs(task) paramMap := make(map[string]yaml.Node, len(taskInputs)) for k, v := range taskInputs { - varTypeValue, err := coreutils.MakeDefaultLiteralForType(v.Type) + varTypeValue, err := coreutils.MakeDefaultLiteralForType(v.GetType()) if err != nil { - fmt.Println("error creating default value for literal type ", v.Type) + fmt.Println("error creating default value for literal type ", v.GetType()) return nil, err } var nativeLiteral interface{} @@ -93,11 +93,11 @@ func ParamMapForTask(task *admin.Task) (map[string]yaml.Node, error) { return nil, err } - if k == v.Description { + if k == v.GetDescription() { // a: # a isn't very helpful paramMap[k], err = getCommentedYamlNode(nativeLiteral, "") } else { - paramMap[k], err = getCommentedYamlNode(nativeLiteral, v.Description) + paramMap[k], err = getCommentedYamlNode(nativeLiteral, v.GetDescription()) } if err != nil { return nil, err @@ -108,22 +108,22 @@ func ParamMapForTask(task *admin.Task) (map[string]yaml.Node, error) { func WorkflowParams(lp *admin.LaunchPlan) map[string]*core.Parameter { workflowParams := map[string]*core.Parameter{} - if lp == nil || lp.Spec == nil { + if lp == nil || lp.GetSpec() == nil { return workflowParams } - if lp.Spec.DefaultInputs == nil { + if lp.GetSpec().GetDefaultInputs() == nil { return workflowParams } - return lp.Spec.DefaultInputs.Parameters + return lp.GetSpec().GetDefaultInputs().GetParameters() } func ParamMapForWorkflow(lp *admin.LaunchPlan) (map[string]yaml.Node, error) { workflowParams := WorkflowParams(lp) paramMap := make(map[string]yaml.Node, len(workflowParams)) for k, v := range workflowParams { - varTypeValue, err := coreutils.MakeDefaultLiteralForType(v.Var.Type) + varTypeValue, err := coreutils.MakeDefaultLiteralForType(v.GetVar().GetType()) if err != nil { - fmt.Println("error creating default value for literal type ", v.Var.Type) + fmt.Println("error creating default value for literal type ", v.GetVar().GetType()) return nil, err } var nativeLiteral interface{} @@ -131,16 +131,16 @@ func ParamMapForWorkflow(lp *admin.LaunchPlan) (map[string]yaml.Node, error) { return nil, err } // Override if there is a default value - if paramsDefault, ok := v.Behavior.(*core.Parameter_Default); ok { + if paramsDefault, ok := v.GetBehavior().(*core.Parameter_Default); ok { if nativeLiteral, err = coreutils.ExtractFromLiteral(paramsDefault.Default); err != nil { return nil, err } } - if k == v.Var.Description { + if k == v.GetVar().GetDescription() { // a: # a isn't very helpful paramMap[k], err = getCommentedYamlNode(nativeLiteral, "") } else { - paramMap[k], err = getCommentedYamlNode(nativeLiteral, v.Var.Description) + paramMap[k], err = getCommentedYamlNode(nativeLiteral, v.GetVar().GetDescription()) } if err != nil { diff --git a/flytectl/cmd/get/execution_util_test.go b/flytectl/cmd/get/execution_util_test.go index 2c081950f2..35b7160efb 100644 --- a/flytectl/cmd/get/execution_util_test.go +++ b/flytectl/cmd/get/execution_util_test.go @@ -18,7 +18,7 @@ func TestTaskInputs(t *testing.T) { t.Run("valid inputs", func(t *testing.T) { task := createTask() retValue := TaskInputs(task) - assert.Equal(t, task.Closure.CompiledTask.Template.Interface.Inputs.Variables, retValue) + assert.Equal(t, task.GetClosure().GetCompiledTask().GetTemplate().GetInterface().GetInputs().GetVariables(), retValue) }) t.Run("closure compiled task nil", func(t *testing.T) { task := createTask() diff --git a/flytectl/cmd/get/launch_plan.go b/flytectl/cmd/get/launch_plan.go index 79cede32c5..aaf1eacc52 100644 --- a/flytectl/cmd/get/launch_plan.go +++ b/flytectl/cmd/get/launch_plan.go @@ -152,12 +152,12 @@ func LaunchplanToTableProtoMessages(l []*admin.LaunchPlan) []proto.Message { messages := make([]proto.Message, 0, len(l)) for _, m := range l { m := proto.Clone(m).(*admin.LaunchPlan) - if m.Closure != nil { - if m.Closure.ExpectedInputs != nil && m.Closure.ExpectedInputs.Parameters != nil { - printer.FormatParameterDescriptions(m.Closure.ExpectedInputs.Parameters) + if m.GetClosure() != nil { + if m.GetClosure().GetExpectedInputs() != nil && m.Closure.ExpectedInputs.Parameters != nil { + printer.FormatParameterDescriptions(m.GetClosure().GetExpectedInputs().GetParameters()) } - if m.Closure.ExpectedOutputs != nil && m.Closure.ExpectedOutputs.Variables != nil { - printer.FormatVariableDescriptions(m.Closure.ExpectedOutputs.Variables) + if m.GetClosure().GetExpectedOutputs() != nil && m.Closure.ExpectedOutputs.Variables != nil { + printer.FormatVariableDescriptions(m.GetClosure().GetExpectedOutputs().GetVariables()) } } messages = append(messages, m) diff --git a/flytectl/cmd/get/launch_plan_test.go b/flytectl/cmd/get/launch_plan_test.go index 7b1359b7ec..64e1e99c09 100644 --- a/flytectl/cmd/get/launch_plan_test.go +++ b/flytectl/cmd/get/launch_plan_test.go @@ -275,7 +275,7 @@ func TestGetLaunchPlanFuncWithError(t *testing.T) { func TestGetLaunchPlanFunc(t *testing.T) { s := testutils.Setup(t) getLaunchPlanSetup() - s.FetcherExt.OnFetchAllVerOfLPMatch(mock.Anything, mock.Anything, "dummyProject", "dummyDomain", filters.Filters{}).Return(launchPlanListResponse.LaunchPlans, nil) + s.FetcherExt.OnFetchAllVerOfLPMatch(mock.Anything, mock.Anything, "dummyProject", "dummyDomain", filters.Filters{}).Return(launchPlanListResponse.GetLaunchPlans(), nil) err := getLaunchPlanFunc(s.Ctx, argsLp, s.CmdCtx) assert.Nil(t, err) s.FetcherExt.AssertCalled(t, "FetchAllVerOfLP", s.Ctx, "launchplan1", "dummyProject", "dummyDomain", launchplan.DefaultConfig.Filter) @@ -308,7 +308,7 @@ func TestGetLaunchPlans(t *testing.T) { t.Run("no workflow filter", func(t *testing.T) { s := testutils.Setup(t) getLaunchPlanSetup() - s.FetcherExt.OnFetchAllVerOfLP(s.Ctx, "", "dummyProject", "dummyDomain", filters.Filters{}).Return(launchPlanListResponse.LaunchPlans, nil) + s.FetcherExt.OnFetchAllVerOfLP(s.Ctx, "", "dummyProject", "dummyDomain", filters.Filters{}).Return(launchPlanListResponse.GetLaunchPlans(), nil) argsLp = []string{} err := getLaunchPlanFunc(s.Ctx, argsLp, s.CmdCtx) assert.Nil(t, err) @@ -319,7 +319,7 @@ func TestGetLaunchPlans(t *testing.T) { getLaunchPlanSetup() s.FetcherExt.OnFetchAllVerOfLP(s.Ctx, "", "dummyProject", "dummyDomain", filters.Filters{ FieldSelector: "workflow.name=workflow2", - }).Return(launchPlanListResponse.LaunchPlans, nil) + }).Return(launchPlanListResponse.GetLaunchPlans(), nil) argsLp = []string{} launchplan.DefaultConfig.Workflow = "workflow2" err := getLaunchPlanFunc(s.Ctx, argsLp, s.CmdCtx) @@ -379,7 +379,7 @@ func TestGetLaunchPlanTableFunc(t *testing.T) { s.MockAdminClient.OnListLaunchPlansMatch(s.Ctx, resourceGetRequest).Return(launchPlanListResponse, nil) s.MockAdminClient.OnGetLaunchPlanMatch(s.Ctx, objectGetRequest).Return(launchPlan2, nil) s.MockAdminClient.OnListLaunchPlanIdsMatch(s.Ctx, namedIDRequest).Return(namedIdentifierList, nil) - s.FetcherExt.OnFetchAllVerOfLP(s.Ctx, "launchplan1", "dummyProject", "dummyDomain", filters.Filters{}).Return(launchPlanListResponse.LaunchPlans, nil) + s.FetcherExt.OnFetchAllVerOfLP(s.Ctx, "launchplan1", "dummyProject", "dummyDomain", filters.Filters{}).Return(launchPlanListResponse.GetLaunchPlans(), nil) config.GetConfig().Output = printer.OutputFormatTABLE.String() err := getLaunchPlanFunc(s.Ctx, argsLp, s.CmdCtx) assert.Nil(t, err) diff --git a/flytectl/cmd/get/node_execution.go b/flytectl/cmd/get/node_execution.go index 89c902ddbd..2ebe23df1e 100644 --- a/flytectl/cmd/get/node_execution.go +++ b/flytectl/cmd/get/node_execution.go @@ -110,7 +110,7 @@ func getExecutionDetails(ctx context.Context, project, domain, execName, nodeNam } sort.Slice(nExecDetailsForView[:], func(i, j int) bool { - return nExecDetailsForView[i].NodeExec.Closure.CreatedAt.AsTime().Before(nExecDetailsForView[j].NodeExec.Closure.CreatedAt.AsTime()) + return nExecDetailsForView[i].NodeExec.Closure.GetCreatedAt().AsTime().Before(nExecDetailsForView[j].NodeExec.Closure.GetCreatedAt().AsTime()) }) return nExecDetailsForView, nil @@ -125,49 +125,49 @@ func getNodeExecDetailsInt(ctx context.Context, project, domain, execName, nodeN } var nodeExecClosures []*NodeExecutionClosure - for _, nodeExec := range nExecDetails.NodeExecutions { + for _, nodeExec := range nExecDetails.GetNodeExecutions() { nodeExecClosure := &NodeExecutionClosure{ NodeExec: &NodeExecution{nodeExec}, } nodeExecClosures = append(nodeExecClosures, nodeExecClosure) // Check if this is parent node. If yes do recursive call to get child nodes. - if nodeExec.Metadata != nil && nodeExec.Metadata.IsParentNode { - nodeExecClosure.ChildNodes, err = getNodeExecDetailsInt(ctx, project, domain, execName, nodeName, nodeExec.Id.NodeId, nodeExecDetailsMap, cmdCtx) + if nodeExec.GetMetadata() != nil && nodeExec.GetMetadata().GetIsParentNode() { + nodeExecClosure.ChildNodes, err = getNodeExecDetailsInt(ctx, project, domain, execName, nodeName, nodeExec.GetId().GetNodeId(), nodeExecDetailsMap, cmdCtx) if err != nil { return nil, err } } else { taskExecList, err := cmdCtx.AdminFetcherExt().FetchTaskExecutionsOnNode(ctx, - nodeExec.Id.NodeId, execName, project, domain) + nodeExec.GetId().GetNodeId(), execName, project, domain) if err != nil { return nil, err } - for _, taskExec := range taskExecList.TaskExecutions { + for _, taskExec := range taskExecList.GetTaskExecutions() { taskExecClosure := &TaskExecutionClosure{ TaskExecution: &TaskExecution{taskExec}, } nodeExecClosure.TaskExecutions = append(nodeExecClosure.TaskExecutions, taskExecClosure) } // Fetch the node inputs and outputs - nExecDataResp, err := cmdCtx.AdminFetcherExt().FetchNodeExecutionData(ctx, nodeExec.Id.NodeId, execName, project, domain) + nExecDataResp, err := cmdCtx.AdminFetcherExt().FetchNodeExecutionData(ctx, nodeExec.GetId().GetNodeId(), execName, project, domain) if err != nil { return nil, err } // Extract the inputs from the literal map - nodeExecClosure.Inputs, err = extractLiteralMap(nExecDataResp.FullInputs) + nodeExecClosure.Inputs, err = extractLiteralMap(nExecDataResp.GetFullInputs()) if err != nil { return nil, err } // Extract the outputs from the literal map - nodeExecClosure.Outputs, err = extractLiteralMap(nExecDataResp.FullOutputs) + nodeExecClosure.Outputs, err = extractLiteralMap(nExecDataResp.GetFullOutputs()) if err != nil { return nil, err } } - nodeExecDetailsMap[nodeExec.Id.NodeId] = nodeExecClosure + nodeExecDetailsMap[nodeExec.GetId().GetNodeId()] = nodeExecClosure // Found the node - if len(nodeName) > 0 && nodeName == nodeExec.Id.NodeId { + if len(nodeName) > 0 && nodeName == nodeExec.GetId().GetNodeId() { return nodeExecClosures, err } } @@ -183,38 +183,38 @@ func createNodeTaskExecTreeView(rootView gotree.Tree, taskExecClosures []*TaskEx } // TODO: Replace this by filter to sort in the admin sort.Slice(taskExecClosures[:], func(i, j int) bool { - return taskExecClosures[i].Id.RetryAttempt < taskExecClosures[j].Id.RetryAttempt + return taskExecClosures[i].Id.GetRetryAttempt() < taskExecClosures[j].Id.GetRetryAttempt() }) for _, taskExecClosure := range taskExecClosures { - attemptView := rootView.Add(taskAttemptPrefix + strconv.Itoa(int(taskExecClosure.Id.RetryAttempt))) - attemptView.Add(taskExecPrefix + taskExecClosure.Closure.Phase.String() + - hyphenPrefix + taskExecClosure.Closure.CreatedAt.AsTime().String() + - hyphenPrefix + taskExecClosure.Closure.UpdatedAt.AsTime().String()) - attemptView.Add(taskTypePrefix + taskExecClosure.Closure.TaskType) - attemptView.Add(taskReasonPrefix + taskExecClosure.Closure.Reason) - if taskExecClosure.Closure.Metadata != nil { + attemptView := rootView.Add(taskAttemptPrefix + strconv.Itoa(int(taskExecClosure.Id.GetRetryAttempt()))) + attemptView.Add(taskExecPrefix + taskExecClosure.Closure.GetPhase().String() + + hyphenPrefix + taskExecClosure.Closure.GetCreatedAt().AsTime().String() + + hyphenPrefix + taskExecClosure.Closure.GetUpdatedAt().AsTime().String()) + attemptView.Add(taskTypePrefix + taskExecClosure.Closure.GetTaskType()) + attemptView.Add(taskReasonPrefix + taskExecClosure.Closure.GetReason()) + if taskExecClosure.Closure.GetMetadata() != nil { metadata := attemptView.Add(taskMetadataPrefix) - metadata.Add(taskGeneratedNamePrefix + taskExecClosure.Closure.Metadata.GeneratedName) - metadata.Add(taskPluginIDPrefix + taskExecClosure.Closure.Metadata.PluginIdentifier) + metadata.Add(taskGeneratedNamePrefix + taskExecClosure.Closure.GetMetadata().GetGeneratedName()) + metadata.Add(taskPluginIDPrefix + taskExecClosure.Closure.GetMetadata().GetPluginIdentifier()) extResourcesView := metadata.Add(taskExtResourcesPrefix) - for _, extResource := range taskExecClosure.Closure.Metadata.ExternalResources { - extResourcesView.Add(taskExtResourcePrefix + extResource.ExternalId) + for _, extResource := range taskExecClosure.Closure.GetMetadata().GetExternalResources() { + extResourcesView.Add(taskExtResourcePrefix + extResource.GetExternalId()) } resourcePoolInfoView := metadata.Add(taskResourcePrefix) - for _, rsPool := range taskExecClosure.Closure.Metadata.ResourcePoolInfo { - resourcePoolInfoView.Add(taskExtResourcePrefix + rsPool.Namespace) - resourcePoolInfoView.Add(taskExtResourceTokenPrefix + rsPool.AllocationToken) + for _, rsPool := range taskExecClosure.Closure.GetMetadata().GetResourcePoolInfo() { + resourcePoolInfoView.Add(taskExtResourcePrefix + rsPool.GetNamespace()) + resourcePoolInfoView.Add(taskExtResourceTokenPrefix + rsPool.GetAllocationToken()) } } - sort.Slice(taskExecClosure.Closure.Logs[:], func(i, j int) bool { - return taskExecClosure.Closure.Logs[i].Name < taskExecClosure.Closure.Logs[j].Name + sort.Slice(taskExecClosure.Closure.GetLogs()[:], func(i, j int) bool { + return taskExecClosure.Closure.GetLogs()[i].GetName() < taskExecClosure.Closure.GetLogs()[j].GetName() }) logsView := attemptView.Add(taskLogsPrefix) - for _, logData := range taskExecClosure.Closure.Logs { - logsView.Add(taskLogsNamePrefix + logData.Name) - logsView.Add(taskLogURIPrefix + logData.Uri) + for _, logData := range taskExecClosure.Closure.GetLogs() { + logsView.Add(taskLogsNamePrefix + logData.GetName()) + logsView.Add(taskLogURIPrefix + logData.GetUri()) } } } @@ -228,13 +228,13 @@ func createNodeDetailsTreeView(rootView gotree.Tree, nodeExecutionClosures []*No } // TODO : Move to sorting using filters. sort.Slice(nodeExecutionClosures[:], func(i, j int) bool { - return nodeExecutionClosures[i].NodeExec.Closure.CreatedAt.AsTime().Before(nodeExecutionClosures[j].NodeExec.Closure.CreatedAt.AsTime()) + return nodeExecutionClosures[i].NodeExec.Closure.GetCreatedAt().AsTime().Before(nodeExecutionClosures[j].NodeExec.Closure.GetCreatedAt().AsTime()) }) for _, nodeExecWrapper := range nodeExecutionClosures { - nExecView := rootView.Add(nodeExecWrapper.NodeExec.Id.NodeId + hyphenPrefix + nodeExecWrapper.NodeExec.Closure.Phase.String() + - hyphenPrefix + nodeExecWrapper.NodeExec.Closure.CreatedAt.AsTime().String() + - hyphenPrefix + nodeExecWrapper.NodeExec.Closure.UpdatedAt.AsTime().String()) + nExecView := rootView.Add(nodeExecWrapper.NodeExec.Id.GetNodeId() + hyphenPrefix + nodeExecWrapper.NodeExec.Closure.GetPhase().String() + + hyphenPrefix + nodeExecWrapper.NodeExec.Closure.GetCreatedAt().AsTime().String() + + hyphenPrefix + nodeExecWrapper.NodeExec.Closure.GetUpdatedAt().AsTime().String()) if len(nodeExecWrapper.ChildNodes) > 0 { createNodeDetailsTreeView(nExecView, nodeExecWrapper.ChildNodes) } @@ -254,7 +254,7 @@ func extractLiteralMap(literalMap *core.LiteralMap) (map[string]interface{}, err if literalMap == nil || literalMap.Literals == nil { return m, nil } - for key, literalVal := range literalMap.Literals { + for key, literalVal := range literalMap.GetLiterals() { extractedLiteralVal, err := coreutils.ExtractFromLiteral(literalVal) if err != nil { return nil, err diff --git a/flytectl/cmd/get/project.go b/flytectl/cmd/get/project.go index 96b68c56fb..3d1bd87ce2 100644 --- a/flytectl/cmd/get/project.go +++ b/flytectl/cmd/get/project.go @@ -84,9 +84,9 @@ func getProjectsFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandC if len(args) == 1 { id := args[0] - logger.Debugf(ctx, "Retrieved %v projects", len(projects.Projects)) - for _, v := range projects.Projects { - if v.Id == id { + logger.Debugf(ctx, "Retrieved %v projects", len(projects.GetProjects())) + for _, v := range projects.GetProjects() { + if v.GetId() == id { err := adminPrinter.Print(config.GetConfig().MustOutputFormat(), projectColumns, v) if err != nil { return err @@ -97,6 +97,6 @@ func getProjectsFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandC return nil } - logger.Debugf(ctx, "Retrieved %v projects", len(projects.Projects)) - return adminPrinter.Print(config.GetConfig().MustOutputFormat(), projectColumns, ProjectToProtoMessages(projects.Projects)...) + logger.Debugf(ctx, "Retrieved %v projects", len(projects.GetProjects())) + return adminPrinter.Print(config.GetConfig().MustOutputFormat(), projectColumns, ProjectToProtoMessages(projects.GetProjects())...) } diff --git a/flytectl/cmd/get/task.go b/flytectl/cmd/get/task.go index 0b050f5546..fcbb00b684 100644 --- a/flytectl/cmd/get/task.go +++ b/flytectl/cmd/get/task.go @@ -125,14 +125,14 @@ func TaskToTableProtoMessages(l []*admin.Task) []proto.Message { messages := make([]proto.Message, 0, len(l)) for _, m := range l { m := proto.Clone(m).(*admin.Task) - if m.Closure != nil && m.Closure.CompiledTask != nil { - if m.Closure.CompiledTask.Template != nil { - if m.Closure.CompiledTask.Template.Interface != nil { - if m.Closure.CompiledTask.Template.Interface.Inputs != nil && m.Closure.CompiledTask.Template.Interface.Inputs.Variables != nil { - printer.FormatVariableDescriptions(m.Closure.CompiledTask.Template.Interface.Inputs.Variables) + if m.GetClosure() != nil && m.GetClosure().GetCompiledTask() != nil { + if m.GetClosure().GetCompiledTask().GetTemplate() != nil { + if m.GetClosure().GetCompiledTask().GetTemplate().GetInterface() != nil { + if m.GetClosure().GetCompiledTask().GetTemplate().GetInterface().GetInputs() != nil && m.Closure.CompiledTask.Template.Interface.Inputs.Variables != nil { + printer.FormatVariableDescriptions(m.GetClosure().GetCompiledTask().GetTemplate().GetInterface().GetInputs().GetVariables()) } - if m.Closure.CompiledTask.Template.Interface.Outputs != nil && m.Closure.CompiledTask.Template.Interface.Outputs.Variables != nil { - printer.FormatVariableDescriptions(m.Closure.CompiledTask.Template.Interface.Outputs.Variables) + if m.GetClosure().GetCompiledTask().GetTemplate().GetInterface().GetOutputs() != nil && m.Closure.CompiledTask.Template.Interface.Outputs.Variables != nil { + printer.FormatVariableDescriptions(m.GetClosure().GetCompiledTask().GetTemplate().GetInterface().GetOutputs().GetVariables()) } } } diff --git a/flytectl/cmd/get/task_test.go b/flytectl/cmd/get/task_test.go index d0f817fd1e..e5c2dafc94 100644 --- a/flytectl/cmd/get/task_test.go +++ b/flytectl/cmd/get/task_test.go @@ -244,7 +244,7 @@ func TestGetTaskFunc(t *testing.T) { s.MockAdminClient.OnListTasksMatch(s.Ctx, resourceListRequestTask).Return(taskListResponse, nil) s.MockAdminClient.OnGetTaskMatch(s.Ctx, objectGetRequestTask).Return(task2, nil) s.FetcherExt.OnFetchAllVerOfTaskMatch(mock.Anything, mock.Anything, mock.Anything, - mock.Anything, mock.Anything).Return(taskListResponse.Tasks, nil) + mock.Anything, mock.Anything).Return(taskListResponse.GetTasks(), nil) err := getTaskFunc(s.Ctx, argsTask, s.CmdCtx) assert.Nil(t, err) s.FetcherExt.AssertCalled(t, "FetchAllVerOfTask", s.Ctx, "task1", "dummyProject", "dummyDomain", filters.Filters{}) @@ -329,7 +329,7 @@ func TestGetTaskFuncWithTable(t *testing.T) { taskConfig.DefaultConfig.Filter = filters.Filters{} s.MockAdminClient.OnListTasksMatch(s.Ctx, resourceListRequestTask).Return(taskListResponse, nil) s.MockAdminClient.OnGetTaskMatch(s.Ctx, objectGetRequestTask).Return(task2, nil) - s.FetcherExt.OnFetchAllVerOfTask(s.Ctx, "task1", "dummyProject", "dummyDomain", filters.Filters{}).Return(taskListResponse.Tasks, nil) + s.FetcherExt.OnFetchAllVerOfTask(s.Ctx, "task1", "dummyProject", "dummyDomain", filters.Filters{}).Return(taskListResponse.GetTasks(), nil) config.GetConfig().Output = "table" err := getTaskFunc(s.Ctx, argsTask, s.CmdCtx) assert.Nil(t, err) @@ -455,7 +455,7 @@ func TestGetTasks(t *testing.T) { taskConfig.DefaultConfig.Filter = filters.Filters{} s.MockAdminClient.OnListTasksMatch(s.Ctx, resourceListRequestTask).Return(taskListResponse, nil) s.MockAdminClient.OnGetTaskMatch(s.Ctx, objectGetRequestTask).Return(task2, nil) - s.FetcherExt.OnFetchAllVerOfTask(s.Ctx, "task1", "dummyProject", "dummyDomain", filters.Filters{}).Return(taskListResponse.Tasks, nil) + s.FetcherExt.OnFetchAllVerOfTask(s.Ctx, "task1", "dummyProject", "dummyDomain", filters.Filters{}).Return(taskListResponse.GetTasks(), nil) err := getTaskFunc(s.Ctx, argsTask, s.CmdCtx) assert.Nil(t, err) @@ -471,8 +471,8 @@ func TestGetTasksFilters(t *testing.T) { } s.MockAdminClient.OnListTasksMatch(s.Ctx, resourceListFilterRequestTask).Return(taskListFilterResponse, nil) filteredTasks := []*admin.Task{} - for _, task := range taskListResponse.Tasks { - if task.Id.Name == "task1" && task.Id.Version == "v1" { + for _, task := range taskListResponse.GetTasks() { + if task.GetId().GetName() == "task1" && task.GetId().GetVersion() == "v1" { filteredTasks = append(filteredTasks, task) } } diff --git a/flytectl/cmd/get/workflow.go b/flytectl/cmd/get/workflow.go index 624e8d2ba8..ce6dc4db8d 100644 --- a/flytectl/cmd/get/workflow.go +++ b/flytectl/cmd/get/workflow.go @@ -129,15 +129,15 @@ func WorkflowToTableProtoMessages(l []*admin.Workflow) []proto.Message { messages := make([]proto.Message, 0, len(l)) for _, m := range l { m := proto.Clone(m).(*admin.Workflow) - if m.Closure != nil && m.Closure.CompiledWorkflow != nil { - if m.Closure.CompiledWorkflow.Primary != nil { - if m.Closure.CompiledWorkflow.Primary.Template != nil { - if m.Closure.CompiledWorkflow.Primary.Template.Interface != nil { - if m.Closure.CompiledWorkflow.Primary.Template.Interface.Inputs != nil && m.Closure.CompiledWorkflow.Primary.Template.Interface.Inputs.Variables != nil { - printer.FormatVariableDescriptions(m.Closure.CompiledWorkflow.Primary.Template.Interface.Inputs.Variables) + if m.GetClosure() != nil && m.GetClosure().GetCompiledWorkflow() != nil { + if m.GetClosure().GetCompiledWorkflow().GetPrimary() != nil { + if m.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate() != nil { + if m.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetInterface() != nil { + if m.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetInterface().GetInputs() != nil && m.Closure.CompiledWorkflow.Primary.Template.Interface.Inputs.Variables != nil { + printer.FormatVariableDescriptions(m.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetInterface().GetInputs().GetVariables()) } - if m.Closure.CompiledWorkflow.Primary.Template.Interface.Outputs != nil && m.Closure.CompiledWorkflow.Primary.Template.Interface.Outputs.Variables != nil { - printer.FormatVariableDescriptions(m.Closure.CompiledWorkflow.Primary.Template.Interface.Outputs.Variables) + if m.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetInterface().GetOutputs() != nil && m.Closure.CompiledWorkflow.Primary.Template.Interface.Outputs.Variables != nil { + printer.FormatVariableDescriptions(m.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetInterface().GetOutputs().GetVariables()) } } } diff --git a/flytectl/cmd/register/register_util.go b/flytectl/cmd/register/register_util.go index b7b419e611..4ef1bab1c1 100644 --- a/flytectl/cmd/register/register_util.go +++ b/flytectl/cmd/register/register_util.go @@ -171,10 +171,10 @@ func register(ctx context.Context, message proto.Message, cmdCtx cmdCore.Command ResourceType: core.ResourceType_LAUNCH_PLAN, Project: config.GetConfig().Project, Domain: config.GetConfig().Domain, - Name: launchPlan.Id.Name, - Version: launchPlan.Id.Version, + Name: launchPlan.GetId().GetName(), + Version: launchPlan.GetId().GetVersion(), }, - Spec: launchPlan.Spec, + Spec: launchPlan.GetSpec(), }) if err != nil { return err @@ -185,8 +185,8 @@ func register(ctx context.Context, message proto.Message, cmdCtx cmdCore.Command Id: &core.Identifier{ Project: config.GetConfig().Project, Domain: config.GetConfig().Domain, - Name: launchPlan.Id.Name, - Version: launchPlan.Id.Version, + Name: launchPlan.GetId().GetName(), + Version: launchPlan.GetId().GetVersion(), }, State: admin.LaunchPlanState_ACTIVE, }) @@ -205,8 +205,8 @@ func register(ctx context.Context, message proto.Message, cmdCtx cmdCore.Command ResourceType: core.ResourceType_WORKFLOW, Project: config.GetConfig().Project, Domain: config.GetConfig().Domain, - Name: workflowSpec.Template.Id.Name, - Version: workflowSpec.Template.Id.Version, + Name: workflowSpec.GetTemplate().GetId().GetName(), + Version: workflowSpec.GetTemplate().GetId().GetVersion(), }, Spec: workflowSpec, }) @@ -223,8 +223,8 @@ func register(ctx context.Context, message proto.Message, cmdCtx cmdCore.Command ResourceType: core.ResourceType_TASK, Project: config.GetConfig().Project, Domain: config.GetConfig().Domain, - Name: taskSpec.Template.Id.Name, - Version: taskSpec.Template.Id.Version, + Name: taskSpec.GetTemplate().GetId().GetName(), + Version: taskSpec.GetTemplate().GetId().GetVersion(), }, Spec: taskSpec, }) @@ -235,39 +235,39 @@ func register(ctx context.Context, message proto.Message, cmdCtx cmdCore.Command } func hydrateNode(node *core.Node, version string, force bool) error { - targetNode := node.Target + targetNode := node.GetTarget() switch v := targetNode.(type) { case *core.Node_TaskNode: taskNodeWrapper := targetNode.(*core.Node_TaskNode) - taskNodeReference := taskNodeWrapper.TaskNode.Reference.(*core.TaskNode_ReferenceId) + taskNodeReference := taskNodeWrapper.TaskNode.GetReference().(*core.TaskNode_ReferenceId) hydrateIdentifier(taskNodeReference.ReferenceId, version, force) case *core.Node_WorkflowNode: workflowNodeWrapper := targetNode.(*core.Node_WorkflowNode) - switch workflowNodeWrapper.WorkflowNode.Reference.(type) { + switch workflowNodeWrapper.WorkflowNode.GetReference().(type) { case *core.WorkflowNode_SubWorkflowRef: - subWorkflowNodeReference := workflowNodeWrapper.WorkflowNode.Reference.(*core.WorkflowNode_SubWorkflowRef) + subWorkflowNodeReference := workflowNodeWrapper.WorkflowNode.GetReference().(*core.WorkflowNode_SubWorkflowRef) hydrateIdentifier(subWorkflowNodeReference.SubWorkflowRef, version, force) case *core.WorkflowNode_LaunchplanRef: - launchPlanNodeReference := workflowNodeWrapper.WorkflowNode.Reference.(*core.WorkflowNode_LaunchplanRef) + launchPlanNodeReference := workflowNodeWrapper.WorkflowNode.GetReference().(*core.WorkflowNode_LaunchplanRef) hydrateIdentifier(launchPlanNodeReference.LaunchplanRef, version, force) default: - return fmt.Errorf("unknown type %T", workflowNodeWrapper.WorkflowNode.Reference) + return fmt.Errorf("unknown type %T", workflowNodeWrapper.WorkflowNode.GetReference()) } case *core.Node_BranchNode: branchNodeWrapper := targetNode.(*core.Node_BranchNode) - if err := hydrateNode(branchNodeWrapper.BranchNode.IfElse.Case.ThenNode, version, force); err != nil { + if err := hydrateNode(branchNodeWrapper.BranchNode.GetIfElse().GetCase().GetThenNode(), version, force); err != nil { return fmt.Errorf("failed to hydrateNode") } - if len(branchNodeWrapper.BranchNode.IfElse.Other) > 0 { - for _, ifBlock := range branchNodeWrapper.BranchNode.IfElse.Other { - if err := hydrateNode(ifBlock.ThenNode, version, force); err != nil { + if len(branchNodeWrapper.BranchNode.GetIfElse().GetOther()) > 0 { + for _, ifBlock := range branchNodeWrapper.BranchNode.GetIfElse().GetOther() { + if err := hydrateNode(ifBlock.GetThenNode(), version, force); err != nil { return fmt.Errorf("failed to hydrateNode") } } } - switch branchNodeWrapper.BranchNode.IfElse.Default.(type) { + switch branchNodeWrapper.BranchNode.GetIfElse().GetDefault().(type) { case *core.IfElseBlock_ElseNode: - elseNodeReference := branchNodeWrapper.BranchNode.IfElse.Default.(*core.IfElseBlock_ElseNode) + elseNodeReference := branchNodeWrapper.BranchNode.GetIfElse().GetDefault().(*core.IfElseBlock_ElseNode) if err := hydrateNode(elseNodeReference.ElseNode, version, force); err != nil { return fmt.Errorf("failed to hydrateNode") } @@ -275,12 +275,12 @@ func hydrateNode(node *core.Node, version string, force bool) error { case *core.IfElseBlock_Error: // Do nothing. default: - return fmt.Errorf("unknown type %T", branchNodeWrapper.BranchNode.IfElse.Default) + return fmt.Errorf("unknown type %T", branchNodeWrapper.BranchNode.GetIfElse().GetDefault()) } case *core.Node_GateNode: // Do nothing. case *core.Node_ArrayNode: - if err := hydrateNode(v.ArrayNode.Node, version, force); err != nil { + if err := hydrateNode(v.ArrayNode.GetNode(), version, force); err != nil { return fmt.Errorf("failed to hydrateNode") } default: @@ -290,33 +290,33 @@ func hydrateNode(node *core.Node, version string, force bool) error { } func hydrateIdentifier(identifier *core.Identifier, version string, force bool) { - if identifier.Project == "" || identifier.Project == registrationProjectPattern { + if identifier.GetProject() == "" || identifier.GetProject() == registrationProjectPattern { identifier.Project = config.GetConfig().Project } - if identifier.Domain == "" || identifier.Domain == registrationDomainPattern { + if identifier.GetDomain() == "" || identifier.GetDomain() == registrationDomainPattern { identifier.Domain = config.GetConfig().Domain } - if force || identifier.Version == "" || identifier.Version == registrationVersionPattern { + if force || identifier.GetVersion() == "" || identifier.GetVersion() == registrationVersionPattern { identifier.Version = version } } func hydrateTaskSpec(task *admin.TaskSpec, sourceUploadedLocation storage.DataReference, destinationDir string) error { - if task.Template.GetContainer() != nil { - for k := range task.Template.GetContainer().Args { - if task.Template.GetContainer().Args[k] == registrationRemotePackagePattern { + if task.GetTemplate().GetContainer() != nil { + for k := range task.GetTemplate().GetContainer().GetArgs() { + if task.GetTemplate().GetContainer().GetArgs()[k] == registrationRemotePackagePattern { task.Template.GetContainer().Args[k] = sourceUploadedLocation.String() } - if task.Template.GetContainer().Args[k] == registrationDestDirPattern { + if task.GetTemplate().GetContainer().GetArgs()[k] == registrationDestDirPattern { task.Template.GetContainer().Args[k] = "." if len(destinationDir) > 0 { task.Template.GetContainer().Args[k] = destinationDir } } } - } else if task.Template.GetK8SPod() != nil && task.Template.GetK8SPod().PodSpec != nil { + } else if task.GetTemplate().GetK8SPod() != nil && task.GetTemplate().GetK8SPod().GetPodSpec() != nil { var podSpec = v1.PodSpec{} - err := utils.UnmarshalStructToObj(task.Template.GetK8SPod().PodSpec, &podSpec) + err := utils.UnmarshalStructToObj(task.GetTemplate().GetK8SPod().GetPodSpec(), &podSpec) if err != nil { return err } @@ -339,9 +339,9 @@ func hydrateTaskSpec(task *admin.TaskSpec, sourceUploadedLocation storage.DataRe } task.Template.Target = &core.TaskTemplate_K8SPod{ K8SPod: &core.K8SPod{ - Metadata: task.Template.GetK8SPod().Metadata, + Metadata: task.GetTemplate().GetK8SPod().GetMetadata(), PodSpec: podSpecStruct, - DataConfig: task.Template.GetK8SPod().DataConfig, + DataConfig: task.GetTemplate().GetK8SPod().GetDataConfig(), }, } } @@ -349,15 +349,15 @@ func hydrateTaskSpec(task *admin.TaskSpec, sourceUploadedLocation storage.DataRe } func validateLPWithSchedule(lpSpec *admin.LaunchPlanSpec, wf *admin.Workflow) error { - schedule := lpSpec.EntityMetadata.Schedule + schedule := lpSpec.GetEntityMetadata().GetSchedule() var scheduleRequiredParams []string - if wf != nil && wf.Closure != nil && wf.Closure.CompiledWorkflow != nil && - wf.Closure.CompiledWorkflow.Primary != nil && wf.Closure.CompiledWorkflow.Primary.Template != nil && - wf.Closure.CompiledWorkflow.Primary.Template.Interface != nil && - wf.Closure.CompiledWorkflow.Primary.Template.Interface.Inputs != nil { - variables := wf.Closure.CompiledWorkflow.Primary.Template.Interface.Inputs.Variables + if wf != nil && wf.GetClosure() != nil && wf.GetClosure().GetCompiledWorkflow() != nil && + wf.GetClosure().GetCompiledWorkflow().GetPrimary() != nil && wf.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate() != nil && + wf.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetInterface() != nil && + wf.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetInterface().GetInputs() != nil { + variables := wf.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetInterface().GetInputs().GetVariables() for varName := range variables { - if varName != schedule.KickoffTimeInputArg { + if varName != schedule.GetKickoffTimeInputArg() { scheduleRequiredParams = append(scheduleRequiredParams, varName) } } @@ -366,16 +366,16 @@ func validateLPWithSchedule(lpSpec *admin.LaunchPlanSpec, wf *admin.Workflow) er // Either the scheduled param should have default or fixed values var scheduleParamsWithValues []string // Check for default values - if lpSpec.DefaultInputs != nil { - for paramName, paramValue := range lpSpec.DefaultInputs.Parameters { - if paramName != schedule.KickoffTimeInputArg && paramValue.GetDefault() != nil { + if lpSpec.GetDefaultInputs() != nil { + for paramName, paramValue := range lpSpec.GetDefaultInputs().GetParameters() { + if paramName != schedule.GetKickoffTimeInputArg() && paramValue.GetDefault() != nil { scheduleParamsWithValues = append(scheduleParamsWithValues, paramName) } } } // Check for fixed values - if lpSpec.FixedInputs != nil && lpSpec.FixedInputs.Literals != nil { - for fixedLiteralName := range lpSpec.FixedInputs.Literals { + if lpSpec.GetFixedInputs() != nil && lpSpec.FixedInputs.Literals != nil { + for fixedLiteralName := range lpSpec.GetFixedInputs().GetLiterals() { scheduleParamsWithValues = append(scheduleParamsWithValues, fixedLiteralName) } } @@ -389,14 +389,14 @@ func validateLPWithSchedule(lpSpec *admin.LaunchPlanSpec, wf *admin.Workflow) er } func validateLaunchSpec(ctx context.Context, lpSpec *admin.LaunchPlanSpec, cmdCtx cmdCore.CommandContext) error { - if lpSpec == nil || lpSpec.WorkflowId == nil || lpSpec.EntityMetadata == nil || - lpSpec.EntityMetadata.Schedule == nil { + if lpSpec == nil || lpSpec.GetWorkflowId() == nil || lpSpec.GetEntityMetadata() == nil || + lpSpec.GetEntityMetadata().GetSchedule() == nil { return nil } // Fetch the workflow spec using the identifier - workflowID := lpSpec.WorkflowId - wf, err := cmdCtx.AdminFetcherExt().FetchWorkflowVersion(ctx, workflowID.Name, workflowID.Version, - workflowID.Project, workflowID.Domain) + workflowID := lpSpec.GetWorkflowId() + wf, err := cmdCtx.AdminFetcherExt().FetchWorkflowVersion(ctx, workflowID.GetName(), workflowID.GetVersion(), + workflowID.GetProject(), workflowID.GetDomain()) if err != nil { return err } @@ -464,7 +464,7 @@ func validateSpec(ctx context.Context, message proto.Message, cmdCtx cmdCore.Com switch v := message.(type) { case *admin.LaunchPlan: launchPlan := v - if err := validateLaunchSpec(ctx, launchPlan.Spec, cmdCtx); err != nil { + if err := validateLaunchSpec(ctx, launchPlan.GetSpec(), cmdCtx); err != nil { return err } } @@ -475,26 +475,26 @@ func hydrateSpec(message proto.Message, uploadLocation storage.DataReference, co switch v := message.(type) { case *admin.LaunchPlan: launchPlan := message.(*admin.LaunchPlan) - hydrateIdentifier(launchPlan.Id, config.Version, config.Force) - hydrateIdentifier(launchPlan.Spec.WorkflowId, config.Version, config.Force) - if err := hydrateLaunchPlanSpec(config.AssumableIamRole, config.K8sServiceAccount, config.OutputLocationPrefix, launchPlan.Spec); err != nil { + hydrateIdentifier(launchPlan.GetId(), config.Version, config.Force) + hydrateIdentifier(launchPlan.GetSpec().GetWorkflowId(), config.Version, config.Force) + if err := hydrateLaunchPlanSpec(config.AssumableIamRole, config.K8sServiceAccount, config.OutputLocationPrefix, launchPlan.GetSpec()); err != nil { return err } case *admin.WorkflowSpec: workflowSpec := message.(*admin.WorkflowSpec) - for _, Noderef := range workflowSpec.Template.Nodes { + for _, Noderef := range workflowSpec.GetTemplate().GetNodes() { if err := hydrateNode(Noderef, config.Version, config.Force); err != nil { return err } } - if workflowSpec.Template.GetFailureNode() != nil { - if err := hydrateNode(workflowSpec.Template.GetFailureNode(), config.Version, config.Force); err != nil { + if workflowSpec.GetTemplate().GetFailureNode() != nil { + if err := hydrateNode(workflowSpec.GetTemplate().GetFailureNode(), config.Version, config.Force); err != nil { return err } } - hydrateIdentifier(workflowSpec.Template.Id, config.Version, config.Force) - for _, subWorkflow := range workflowSpec.SubWorkflows { - for _, Noderef := range subWorkflow.Nodes { + hydrateIdentifier(workflowSpec.GetTemplate().GetId(), config.Version, config.Force) + for _, subWorkflow := range workflowSpec.GetSubWorkflows() { + for _, Noderef := range subWorkflow.GetNodes() { if err := hydrateNode(Noderef, config.Version, config.Force); err != nil { return err } @@ -504,11 +504,11 @@ func hydrateSpec(message proto.Message, uploadLocation storage.DataReference, co return err } } - hydrateIdentifier(subWorkflow.Id, config.Version, config.Force) + hydrateIdentifier(subWorkflow.GetId(), config.Version, config.Force) } case *admin.TaskSpec: taskSpec := message.(*admin.TaskSpec) - hydrateIdentifier(taskSpec.Template.Id, config.Version, config.Force) + hydrateIdentifier(taskSpec.GetTemplate().GetId(), config.Version, config.Force) // In case of fast serialize input proto also have on additional variable to substitute i.e destination bucket for source code if err := hydrateTaskSpec(taskSpec, uploadLocation, config.DestinationDirectory); err != nil { return err @@ -607,7 +607,7 @@ func readAndCopyArchive(src io.Reader, tempDir string, unarchivedFiles []string) } } } else if header.Typeflag == tar.TypeReg { - dest, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode)) + dest, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode)) // #nosec G115 if err != nil { return unarchivedFiles, err } @@ -814,8 +814,8 @@ func uploadFastRegisterArtifact(ctx context.Context, project, domain, sourceCode } } - if resp != nil && len(resp.SignedUrl) > 0 { - return storage.DataReference(resp.NativeUrl), DirectUpload(resp.SignedUrl, h, size, dataRefReaderCloser) + if resp != nil && len(resp.GetSignedUrl()) > 0 { + return storage.DataReference(resp.GetNativeUrl()), DirectUpload(resp.GetSignedUrl(), h, size, dataRefReaderCloser) } dataStore, err := getStorageClient(ctx) diff --git a/flytectl/cmd/register/register_util_test.go b/flytectl/cmd/register/register_util_test.go index e068c0f64a..fbe6e8b6f2 100644 --- a/flytectl/cmd/register/register_util_test.go +++ b/flytectl/cmd/register/register_util_test.go @@ -359,8 +359,8 @@ func TestHydrateLaunchPlanSpec(t *testing.T) { lpSpec := &admin.LaunchPlanSpec{} err := hydrateLaunchPlanSpec(rconfig.DefaultFilesConfig.AssumableIamRole, rconfig.DefaultFilesConfig.K8sServiceAccount, rconfig.DefaultFilesConfig.OutputLocationPrefix, lpSpec) assert.Nil(t, err) - assert.Equal(t, &admin.AuthRole{AssumableIamRole: "iamRole"}, lpSpec.AuthRole) - assert.Equal(t, &core.SecurityContext{RunAs: &core.Identity{IamRole: "iamRole"}}, lpSpec.SecurityContext) + assert.Equal(t, &admin.AuthRole{AssumableIamRole: "iamRole"}, lpSpec.GetAuthRole()) + assert.Equal(t, &core.SecurityContext{RunAs: &core.Identity{IamRole: "iamRole"}}, lpSpec.GetSecurityContext()) }) t.Run("k8sService account override", func(t *testing.T) { registerFilesSetup() @@ -368,8 +368,8 @@ func TestHydrateLaunchPlanSpec(t *testing.T) { lpSpec := &admin.LaunchPlanSpec{} err := hydrateLaunchPlanSpec(rconfig.DefaultFilesConfig.AssumableIamRole, rconfig.DefaultFilesConfig.K8sServiceAccount, rconfig.DefaultFilesConfig.OutputLocationPrefix, lpSpec) assert.Nil(t, err) - assert.Equal(t, &admin.AuthRole{KubernetesServiceAccount: "k8Account"}, lpSpec.AuthRole) - assert.Equal(t, &core.SecurityContext{RunAs: &core.Identity{K8SServiceAccount: "k8Account"}}, lpSpec.SecurityContext) + assert.Equal(t, &admin.AuthRole{KubernetesServiceAccount: "k8Account"}, lpSpec.GetAuthRole()) + assert.Equal(t, &core.SecurityContext{RunAs: &core.Identity{K8SServiceAccount: "k8Account"}}, lpSpec.GetSecurityContext()) }) t.Run("Both k8sService and IamRole", func(t *testing.T) { registerFilesSetup() @@ -379,8 +379,8 @@ func TestHydrateLaunchPlanSpec(t *testing.T) { err := hydrateLaunchPlanSpec(rconfig.DefaultFilesConfig.AssumableIamRole, rconfig.DefaultFilesConfig.K8sServiceAccount, rconfig.DefaultFilesConfig.OutputLocationPrefix, lpSpec) assert.Nil(t, err) assert.Equal(t, &admin.AuthRole{AssumableIamRole: "iamRole", - KubernetesServiceAccount: "k8Account"}, lpSpec.AuthRole) - assert.Equal(t, &core.SecurityContext{RunAs: &core.Identity{IamRole: "iamRole", K8SServiceAccount: "k8Account"}}, lpSpec.SecurityContext) + KubernetesServiceAccount: "k8Account"}, lpSpec.GetAuthRole()) + assert.Equal(t, &core.SecurityContext{RunAs: &core.Identity{IamRole: "iamRole", K8SServiceAccount: "k8Account"}}, lpSpec.GetSecurityContext()) }) t.Run("Output prefix", func(t *testing.T) { registerFilesSetup() @@ -388,7 +388,7 @@ func TestHydrateLaunchPlanSpec(t *testing.T) { lpSpec := &admin.LaunchPlanSpec{} err := hydrateLaunchPlanSpec(rconfig.DefaultFilesConfig.AssumableIamRole, rconfig.DefaultFilesConfig.K8sServiceAccount, rconfig.DefaultFilesConfig.OutputLocationPrefix, lpSpec) assert.Nil(t, err) - assert.Equal(t, &admin.RawOutputDataConfig{OutputLocationPrefix: "prefix"}, lpSpec.RawOutputDataConfig) + assert.Equal(t, &admin.RawOutputDataConfig{OutputLocationPrefix: "prefix"}, lpSpec.GetRawOutputDataConfig()) }) } @@ -648,7 +648,7 @@ func TestHydrateTaskSpec(t *testing.T) { err = hydrateTaskSpec(task, storage.DataReference("file://somewhere"), "sourcey") assert.NoError(t, err) var hydratedPodSpec = v1.PodSpec{} - err = utils.UnmarshalStructToObj(task.Template.GetK8SPod().PodSpec, &hydratedPodSpec) + err = utils.UnmarshalStructToObj(task.GetTemplate().GetK8SPod().GetPodSpec(), &hydratedPodSpec) assert.NoError(t, err) assert.Len(t, hydratedPodSpec.Containers[1].Args, 2) assert.Contains(t, hydratedPodSpec.Containers[1].Args[1], "somewhere") diff --git a/flytectl/cmd/update/execution.go b/flytectl/cmd/update/execution.go index 70f34b342f..efe8e64be7 100644 --- a/flytectl/cmd/update/execution.go +++ b/flytectl/cmd/update/execution.go @@ -36,13 +36,13 @@ func updateExecutionFunc(ctx context.Context, args []string, cmdCtx cmdCore.Comm project := config.GetConfig().Project domain := config.GetConfig().Domain if len(args) != 1 { - return fmt.Errorf(clierrors.ErrExecutionNotPassed) + return fmt.Errorf(clierrors.ErrExecutionNotPassed) //nolint } executionName := args[0] activate := execution.UConfig.Activate archive := execution.UConfig.Archive if activate && archive { - return fmt.Errorf(clierrors.ErrInvalidStateUpdate) + return fmt.Errorf(clierrors.ErrInvalidStateUpdate) //nolint } var newState admin.ExecutionState diff --git a/flytectl/cmd/update/execution_test.go b/flytectl/cmd/update/execution_test.go index fbcb0b02e9..4fde5683a5 100644 --- a/flytectl/cmd/update/execution_test.go +++ b/flytectl/cmd/update/execution_test.go @@ -28,7 +28,7 @@ func TestExecutionCanBeActivated(t *testing.T) { t, "UpdateExecution", s.Ctx, mock.MatchedBy( func(r *admin.ExecutionUpdateRequest) bool { - return r.State == admin.ExecutionState_EXECUTION_ACTIVE + return r.GetState() == admin.ExecutionState_EXECUTION_ACTIVE })) }) } @@ -47,7 +47,7 @@ func TestExecutionCanBeArchived(t *testing.T) { t, "UpdateExecution", s.Ctx, mock.MatchedBy( func(r *admin.ExecutionUpdateRequest) bool { - return r.State == admin.ExecutionState_EXECUTION_ARCHIVED + return r.GetState() == admin.ExecutionState_EXECUTION_ARCHIVED })) }) } @@ -146,7 +146,7 @@ func TestExecutionUpdateFailsWhenExecutionDoesNotExist(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, execution *admin.Execution) { s.FetcherExt. - OnFetchExecution(s.Ctx, execution.Id.Name, execution.Id.Project, execution.Id.Domain). + OnFetchExecution(s.Ctx, execution.GetId().GetName(), execution.GetId().GetProject(), execution.GetId().GetDomain()). Return(nil, ext.NewNotFoundError("execution not found")) s.MockAdminClient. OnUpdateExecutionMatch(s.Ctx, mock.Anything). @@ -165,7 +165,7 @@ func TestExecutionUpdateFailsWhenAdminClientFails(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, execution *admin.Execution) { s.FetcherExt. - OnFetchExecution(s.Ctx, execution.Id.Name, execution.Id.Project, execution.Id.Domain). + OnFetchExecution(s.Ctx, execution.GetId().GetName(), execution.GetId().GetProject(), execution.GetId().GetDomain()). Return(execution, nil) s.MockAdminClient. OnUpdateExecutionMatch(s.Ctx, mock.Anything). @@ -200,7 +200,7 @@ func testExecutionUpdate( t, /* mockSetup */ func(s *testutils.TestStruct, execution *admin.Execution) { s.FetcherExt. - OnFetchExecution(s.Ctx, execution.Id.Name, execution.Id.Project, execution.Id.Domain). + OnFetchExecution(s.Ctx, execution.GetId().GetName(), execution.GetId().GetProject(), execution.GetId().GetDomain()). Return(execution, nil) s.MockAdminClient. OnUpdateExecutionMatch(s.Ctx, mock.Anything). @@ -230,7 +230,7 @@ func testExecutionUpdateWithMockSetup( setup(&s, execution.UConfig, target) } - args := []string{target.Id.Name} + args := []string{target.GetId().GetName()} err := updateExecutionFunc(s.Ctx, args, s.CmdCtx) if asserter != nil { diff --git a/flytectl/cmd/update/launch_plan.go b/flytectl/cmd/update/launch_plan.go index 466551667e..c847e92e6b 100644 --- a/flytectl/cmd/update/launch_plan.go +++ b/flytectl/cmd/update/launch_plan.go @@ -36,12 +36,12 @@ func updateLPFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandCont project := config.GetConfig().Project domain := config.GetConfig().Domain if len(args) != 1 { - return fmt.Errorf(clierrors.ErrLPNotPassed) + return fmt.Errorf(clierrors.ErrLPNotPassed) //nolint } name := args[0] version := launchplan.UConfig.Version if len(version) == 0 { - return fmt.Errorf(clierrors.ErrLPVersionNotPassed) + return fmt.Errorf(clierrors.ErrLPVersionNotPassed) //nolint } activate := launchplan.UConfig.Activate @@ -55,7 +55,7 @@ func updateLPFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandCont deactivate = launchplan.UConfig.Deactivate } if activate == deactivate && deactivate { - return fmt.Errorf(clierrors.ErrInvalidBothStateUpdate) + return fmt.Errorf(clierrors.ErrInvalidBothStateUpdate) //nolint } var newState admin.LaunchPlanState diff --git a/flytectl/cmd/update/launch_plan_meta.go b/flytectl/cmd/update/launch_plan_meta.go index 7b1c93fd85..51b6c6769e 100644 --- a/flytectl/cmd/update/launch_plan_meta.go +++ b/flytectl/cmd/update/launch_plan_meta.go @@ -37,7 +37,7 @@ func getUpdateLPMetaFunc(namedEntityConfig *NamedEntityConfig) func(ctx context. project := config.GetConfig().Project domain := config.GetConfig().Domain if len(args) != 1 { - return fmt.Errorf(clierrors.ErrLPNotPassed) + return fmt.Errorf(clierrors.ErrLPNotPassed) //nolint } name := args[0] err := namedEntityConfig.UpdateNamedEntity(ctx, name, project, domain, core.ResourceType_LAUNCH_PLAN, cmdCtx) diff --git a/flytectl/cmd/update/launch_plan_test.go b/flytectl/cmd/update/launch_plan_test.go index 249a810118..4238a205d5 100644 --- a/flytectl/cmd/update/launch_plan_test.go +++ b/flytectl/cmd/update/launch_plan_test.go @@ -28,7 +28,7 @@ func TestLaunchPlanCanBeActivated(t *testing.T) { t, "UpdateLaunchPlan", s.Ctx, mock.MatchedBy( func(r *admin.LaunchPlanUpdateRequest) bool { - return r.State == admin.LaunchPlanState_ACTIVE + return r.GetState() == admin.LaunchPlanState_ACTIVE })) }) } @@ -47,7 +47,7 @@ func TestLaunchPlanCanBeArchived(t *testing.T) { t, "UpdateLaunchPlan", s.Ctx, mock.MatchedBy( func(r *admin.LaunchPlanUpdateRequest) bool { - return r.State == admin.LaunchPlanState_INACTIVE + return r.GetState() == admin.LaunchPlanState_INACTIVE })) }) } @@ -66,7 +66,7 @@ func TestLaunchPlanCanBeDeactivated(t *testing.T) { t, "UpdateLaunchPlan", s.Ctx, mock.MatchedBy( func(r *admin.LaunchPlanUpdateRequest) bool { - return r.State == admin.LaunchPlanState_INACTIVE + return r.GetState() == admin.LaunchPlanState_INACTIVE })) }) } @@ -275,8 +275,8 @@ func testLaunchPlanUpdateWithMockSetup( setup(&s, launchplan.UConfig, target) } - args := []string{target.Id.Name} - launchplan.UConfig.Version = target.Id.Version + args := []string{target.GetId().GetName()} + launchplan.UConfig.Version = target.GetId().GetVersion() err := updateLPFunc(s.Ctx, args, s.CmdCtx) if asserter != nil { diff --git a/flytectl/cmd/update/matchable_cluster_resource_attribute_test.go b/flytectl/cmd/update/matchable_cluster_resource_attribute_test.go index b7288d6dcc..90d4fca9f7 100644 --- a/flytectl/cmd/update/matchable_cluster_resource_attribute_test.go +++ b/flytectl/cmd/update/matchable_cluster_resource_attribute_test.go @@ -274,10 +274,10 @@ func TestClusterResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testi t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. - OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_CLUSTER_RESOURCE). + OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_CLUSTER_RESOURCE). Return(nil, ext.NewNotFoundError("attribute")) s.UpdaterExt. - OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything). Return(nil) }, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { @@ -296,10 +296,10 @@ func TestClusterResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testi t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. - OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_CLUSTER_RESOURCE). + OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_CLUSTER_RESOURCE). Return(nil, ext.NewNotFoundError("attribute")) s.UpdaterExt. - OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything). Return(nil) }, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { @@ -318,10 +318,10 @@ func TestClusterResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testi t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. - OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_CLUSTER_RESOURCE). + OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_CLUSTER_RESOURCE). Return(nil, ext.NewNotFoundError("attribute")) s.UpdaterExt. - OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything). Return(nil) }, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { @@ -342,10 +342,10 @@ func TestClusterResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. - OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_CLUSTER_RESOURCE). + OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_CLUSTER_RESOURCE). Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything). Return(fmt.Errorf("network error")) }, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { @@ -363,10 +363,10 @@ func TestClusterResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. - OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_CLUSTER_RESOURCE). + OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_CLUSTER_RESOURCE). Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything). Return(fmt.Errorf("network error")) }, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { @@ -384,10 +384,10 @@ func TestClusterResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. - OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_CLUSTER_RESOURCE). + OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_CLUSTER_RESOURCE). Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything). Return(fmt.Errorf("network error")) }, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { @@ -410,10 +410,10 @@ func testWorkflowClusterResourceAttributeUpdate( t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. - OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_CLUSTER_RESOURCE). + OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_CLUSTER_RESOURCE). Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything). Return(nil) }, setup, @@ -479,10 +479,10 @@ func testProjectClusterResourceAttributeUpdate( t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. - OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_CLUSTER_RESOURCE). + OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_CLUSTER_RESOURCE). Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything). Return(nil) }, setup, @@ -546,10 +546,10 @@ func testProjectDomainClusterResourceAttributeUpdate( t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. - OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_CLUSTER_RESOURCE). + OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_CLUSTER_RESOURCE). Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything). Return(nil) }, setup, diff --git a/flytectl/cmd/update/matchable_execution_cluster_label_test.go b/flytectl/cmd/update/matchable_execution_cluster_label_test.go index 1006234626..ba14dbe535 100644 --- a/flytectl/cmd/update/matchable_execution_cluster_label_test.go +++ b/flytectl/cmd/update/matchable_execution_cluster_label_test.go @@ -274,10 +274,10 @@ func TestExecutionClusterLabelUpdateSucceedsWhenAttributesDoNotExist(t *testing. t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. - OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). + OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_EXECUTION_CLUSTER_LABEL). Return(nil, ext.NewNotFoundError("attribute")) s.UpdaterExt. - OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything). Return(nil) }, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { @@ -296,10 +296,10 @@ func TestExecutionClusterLabelUpdateSucceedsWhenAttributesDoNotExist(t *testing. t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. - OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). + OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_EXECUTION_CLUSTER_LABEL). Return(nil, ext.NewNotFoundError("attribute")) s.UpdaterExt. - OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything). Return(nil) }, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { @@ -318,10 +318,10 @@ func TestExecutionClusterLabelUpdateSucceedsWhenAttributesDoNotExist(t *testing. t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. - OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). + OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_EXECUTION_CLUSTER_LABEL). Return(nil, ext.NewNotFoundError("attribute")) s.UpdaterExt. - OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything). Return(nil) }, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes) { @@ -342,10 +342,10 @@ func TestExecutionClusterLabelUpdateFailsWhenAdminClientFails(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. - OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). + OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_EXECUTION_CLUSTER_LABEL). Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything). Return(fmt.Errorf("network error")) }, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { @@ -363,10 +363,10 @@ func TestExecutionClusterLabelUpdateFailsWhenAdminClientFails(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. - OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). + OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_EXECUTION_CLUSTER_LABEL). Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything). Return(fmt.Errorf("network error")) }, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { @@ -384,10 +384,10 @@ func TestExecutionClusterLabelUpdateFailsWhenAdminClientFails(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. - OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). + OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_EXECUTION_CLUSTER_LABEL). Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything). Return(fmt.Errorf("network error")) }, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes) { @@ -410,10 +410,10 @@ func testWorkflowExecutionClusterLabelUpdate( t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. - OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). + OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_EXECUTION_CLUSTER_LABEL). Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything). Return(nil) }, setup, @@ -475,10 +475,10 @@ func testProjectExecutionClusterLabelUpdate( t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. - OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). + OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_EXECUTION_CLUSTER_LABEL). Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything). Return(nil) }, setup, @@ -538,10 +538,10 @@ func testProjectDomainExecutionClusterLabelUpdate( t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. - OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). + OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_EXECUTION_CLUSTER_LABEL). Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything). Return(nil) }, setup, diff --git a/flytectl/cmd/update/matchable_execution_queue_attribute_test.go b/flytectl/cmd/update/matchable_execution_queue_attribute_test.go index e16526faa6..a240dfdd98 100644 --- a/flytectl/cmd/update/matchable_execution_queue_attribute_test.go +++ b/flytectl/cmd/update/matchable_execution_queue_attribute_test.go @@ -274,10 +274,10 @@ func TestExecutionQueueAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testin t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. - OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_QUEUE). + OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_EXECUTION_QUEUE). Return(nil, ext.NewNotFoundError("attribute")) s.UpdaterExt. - OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything). Return(nil) }, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { @@ -296,10 +296,10 @@ func TestExecutionQueueAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testin t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. - OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_QUEUE). + OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_EXECUTION_QUEUE). Return(nil, ext.NewNotFoundError("attribute")) s.UpdaterExt. - OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything). Return(nil) }, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { @@ -318,10 +318,10 @@ func TestExecutionQueueAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testin t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. - OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_QUEUE). + OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_EXECUTION_QUEUE). Return(nil, ext.NewNotFoundError("attribute")) s.UpdaterExt. - OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything). Return(nil) }, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { @@ -342,10 +342,10 @@ func TestExecutionQueueAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. - OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_QUEUE). + OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_EXECUTION_QUEUE). Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything). Return(fmt.Errorf("network error")) }, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { @@ -363,10 +363,10 @@ func TestExecutionQueueAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. - OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_QUEUE). + OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_EXECUTION_QUEUE). Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything). Return(fmt.Errorf("network error")) }, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { @@ -384,10 +384,10 @@ func TestExecutionQueueAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. - OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_QUEUE). + OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_EXECUTION_QUEUE). Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything). Return(fmt.Errorf("network error")) }, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { @@ -410,10 +410,10 @@ func testWorkflowExecutionQueueAttributeUpdate( t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. - OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_QUEUE). + OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_EXECUTION_QUEUE). Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything). Return(nil) }, setup, @@ -479,10 +479,10 @@ func testProjectExecutionQueueAttributeUpdate( t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. - OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_QUEUE). + OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_EXECUTION_QUEUE). Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything). Return(nil) }, setup, @@ -546,10 +546,10 @@ func testProjectDomainExecutionQueueAttributeUpdate( t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. - OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_QUEUE). + OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_EXECUTION_QUEUE). Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything). Return(nil) }, setup, diff --git a/flytectl/cmd/update/matchable_plugin_override_test.go b/flytectl/cmd/update/matchable_plugin_override_test.go index 3b0181392b..649619be03 100644 --- a/flytectl/cmd/update/matchable_plugin_override_test.go +++ b/flytectl/cmd/update/matchable_plugin_override_test.go @@ -274,10 +274,10 @@ func TestPluginOverrideUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. - OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_PLUGIN_OVERRIDE). + OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_PLUGIN_OVERRIDE). Return(nil, ext.NewNotFoundError("attribute")) s.UpdaterExt. - OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything). Return(nil) }, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { @@ -296,10 +296,10 @@ func TestPluginOverrideUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. - OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_PLUGIN_OVERRIDE). + OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_PLUGIN_OVERRIDE). Return(nil, ext.NewNotFoundError("attribute")) s.UpdaterExt. - OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything). Return(nil) }, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { @@ -318,10 +318,10 @@ func TestPluginOverrideUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. - OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_PLUGIN_OVERRIDE). + OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_PLUGIN_OVERRIDE). Return(nil, ext.NewNotFoundError("attribute")) s.UpdaterExt. - OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything). Return(nil) }, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes) { @@ -342,10 +342,10 @@ func TestPluginOverrideUpdateFailsWhenAdminClientFails(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. - OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_PLUGIN_OVERRIDE). + OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_PLUGIN_OVERRIDE). Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything). Return(fmt.Errorf("network error")) }, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { @@ -363,10 +363,10 @@ func TestPluginOverrideUpdateFailsWhenAdminClientFails(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. - OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_PLUGIN_OVERRIDE). + OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_PLUGIN_OVERRIDE). Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything). Return(fmt.Errorf("network error")) }, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { @@ -384,10 +384,10 @@ func TestPluginOverrideUpdateFailsWhenAdminClientFails(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. - OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_PLUGIN_OVERRIDE). + OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_PLUGIN_OVERRIDE). Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything). Return(fmt.Errorf("network error")) }, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes) { @@ -410,10 +410,10 @@ func testWorkflowPluginOverrideUpdate( t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. - OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_PLUGIN_OVERRIDE). + OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_PLUGIN_OVERRIDE). Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything). Return(nil) }, setup, @@ -485,10 +485,10 @@ func testProjectPluginOverrideUpdate( t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. - OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_PLUGIN_OVERRIDE). + OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_PLUGIN_OVERRIDE). Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything). Return(nil) }, setup, @@ -558,10 +558,10 @@ func testProjectDomainPluginOverrideUpdate( t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. - OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_PLUGIN_OVERRIDE). + OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_PLUGIN_OVERRIDE). Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything). Return(nil) }, setup, diff --git a/flytectl/cmd/update/matchable_task_resource_attribute_test.go b/flytectl/cmd/update/matchable_task_resource_attribute_test.go index 42c2c3ab4f..2fffe2b5ec 100644 --- a/flytectl/cmd/update/matchable_task_resource_attribute_test.go +++ b/flytectl/cmd/update/matchable_task_resource_attribute_test.go @@ -274,10 +274,10 @@ func TestTaskResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testing. t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. - OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_TASK_RESOURCE). + OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_TASK_RESOURCE). Return(nil, ext.NewNotFoundError("attribute")) s.UpdaterExt. - OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything). Return(nil) }, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { @@ -296,10 +296,10 @@ func TestTaskResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testing. t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. - OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_TASK_RESOURCE). + OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_TASK_RESOURCE). Return(nil, ext.NewNotFoundError("attribute")) s.UpdaterExt. - OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything). Return(nil) }, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { @@ -318,10 +318,10 @@ func TestTaskResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testing. t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. - OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_TASK_RESOURCE). + OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_TASK_RESOURCE). Return(nil, ext.NewNotFoundError("attribute")) s.UpdaterExt. - OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything). Return(nil) }, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { @@ -342,10 +342,10 @@ func TestTaskResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. - OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_TASK_RESOURCE). + OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_TASK_RESOURCE). Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything). Return(fmt.Errorf("network error")) }, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { @@ -363,10 +363,10 @@ func TestTaskResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. - OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_TASK_RESOURCE). + OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_TASK_RESOURCE). Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything). Return(fmt.Errorf("network error")) }, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { @@ -384,10 +384,10 @@ func TestTaskResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. - OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_TASK_RESOURCE). + OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_TASK_RESOURCE). Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything). Return(fmt.Errorf("network error")) }, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { @@ -410,10 +410,10 @@ func testWorkflowTaskResourceAttributeUpdate( t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. - OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_TASK_RESOURCE). + OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_TASK_RESOURCE). Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything). Return(nil) }, setup, @@ -478,10 +478,10 @@ func testProjectTaskResourceAttributeUpdate( t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. - OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_TASK_RESOURCE). + OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_TASK_RESOURCE). Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything). Return(nil) }, setup, @@ -544,10 +544,10 @@ func testProjectDomainTaskResourceAttributeUpdate( t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. - OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_TASK_RESOURCE). + OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_TASK_RESOURCE). Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything). Return(nil) }, setup, diff --git a/flytectl/cmd/update/matchable_workflow_execution_config_test.go b/flytectl/cmd/update/matchable_workflow_execution_config_test.go index c75b2fd58f..e026a91a9b 100644 --- a/flytectl/cmd/update/matchable_workflow_execution_config_test.go +++ b/flytectl/cmd/update/matchable_workflow_execution_config_test.go @@ -274,10 +274,10 @@ func TestWorkflowExecutionConfigUpdateSucceedsWhenAttributesDoNotExist(t *testin t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. - OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). + OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). Return(nil, ext.NewNotFoundError("attribute")) s.UpdaterExt. - OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything). Return(nil) }, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { @@ -296,10 +296,10 @@ func TestWorkflowExecutionConfigUpdateSucceedsWhenAttributesDoNotExist(t *testin t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. - OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). + OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). Return(nil, ext.NewNotFoundError("attribute")) s.UpdaterExt. - OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything). Return(nil) }, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { @@ -318,10 +318,10 @@ func TestWorkflowExecutionConfigUpdateSucceedsWhenAttributesDoNotExist(t *testin t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. - OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). + OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). Return(nil, ext.NewNotFoundError("attribute")) s.UpdaterExt. - OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything). Return(nil) }, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes) { @@ -342,10 +342,10 @@ func TestWorkflowExecutionConfigUpdateFailsWhenAdminClientFails(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. - OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). + OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything). Return(fmt.Errorf("network error")) }, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { @@ -363,10 +363,10 @@ func TestWorkflowExecutionConfigUpdateFailsWhenAdminClientFails(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. - OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). + OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything). Return(fmt.Errorf("network error")) }, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { @@ -384,10 +384,10 @@ func TestWorkflowExecutionConfigUpdateFailsWhenAdminClientFails(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. - OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). + OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything). Return(fmt.Errorf("network error")) }, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes) { @@ -410,10 +410,10 @@ func testWorkflowExecutionConfigUpdate( t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. - OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). + OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything). Return(nil) }, setup, @@ -482,10 +482,10 @@ func testProjectWorkflowExecutionConfigUpdate( t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. - OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). + OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything). Return(nil) }, setup, @@ -552,10 +552,10 @@ func testProjectDomainWorkflowExecutionConfigUpdate( t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. - OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). + OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil) s.UpdaterExt. - OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything). Return(nil) }, setup, diff --git a/flytectl/cmd/update/named_entity.go b/flytectl/cmd/update/named_entity.go index 61bbffc705..5e99775e14 100644 --- a/flytectl/cmd/update/named_entity.go +++ b/flytectl/cmd/update/named_entity.go @@ -28,7 +28,7 @@ type NamedEntityConfig struct { func (cfg NamedEntityConfig) UpdateNamedEntity(ctx context.Context, name string, project string, domain string, rsType core.ResourceType, cmdCtx cmdCore.CommandContext) error { if cfg.Activate && cfg.Archive { - return fmt.Errorf(clierrors.ErrInvalidStateUpdate) + return fmt.Errorf(clierrors.ErrInvalidStateUpdate) //nolint } id := &admin.NamedEntityIdentifier{ @@ -45,7 +45,7 @@ func (cfg NamedEntityConfig) UpdateNamedEntity(ctx context.Context, name string, return fmt.Errorf("update metadata for %s: could not fetch metadata: %w", name, err) } - oldMetadata, newMetadata := composeNamedMetadataEdits(cfg, namedEntity.Metadata) + oldMetadata, newMetadata := composeNamedMetadataEdits(cfg, namedEntity.GetMetadata()) patch, err := DiffAsYaml(diffPathBefore, diffPathAfter, oldMetadata, newMetadata) if err != nil { panic(err) @@ -86,15 +86,15 @@ func composeNamedMetadataEdits(config NamedEntityConfig, current *admin.NamedEnt case config.Activate && config.Archive: panic("cannot both activate and archive") case config.Activate: - old.State = current.State + old.State = current.GetState() new.State = admin.NamedEntityState_NAMED_ENTITY_ACTIVE case config.Archive: - old.State = current.State + old.State = current.GetState() new.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED } if config.Description != "" { - old.Description = current.Description + old.Description = current.GetDescription() new.Description = config.Description } diff --git a/flytectl/cmd/update/named_entity_test.go b/flytectl/cmd/update/named_entity_test.go index 4d4e5b2783..1f8e28a525 100644 --- a/flytectl/cmd/update/named_entity_test.go +++ b/flytectl/cmd/update/named_entity_test.go @@ -59,7 +59,7 @@ func testNamedEntityUpdateWithMockSetup( updateMetadataFactory := getUpdateMetadataFactory(resourceType) - args := []string{target.Id.Name} + args := []string{target.GetId().GetName()} err := updateMetadataFactory(config)(s.Ctx, args, s.CmdCtx) if asserter != nil { diff --git a/flytectl/cmd/update/project.go b/flytectl/cmd/update/project.go index f6196e35ff..3a779df476 100644 --- a/flytectl/cmd/update/project.go +++ b/flytectl/cmd/update/project.go @@ -103,13 +103,13 @@ func updateProjectsFunc(ctx context.Context, args []string, cmdCtx cmdCore.Comma return err } - if edits.Id == "" { - return fmt.Errorf(clierrors.ErrProjectNotPassed) + if edits.GetId() == "" { + return fmt.Errorf(clierrors.ErrProjectNotPassed) //nolint } - currentProject, err := cmdCtx.AdminFetcherExt().GetProjectByID(ctx, edits.Id) + currentProject, err := cmdCtx.AdminFetcherExt().GetProjectByID(ctx, edits.GetId()) if err != nil { - return fmt.Errorf("update project %s: could not fetch project: %w", edits.Id, err) + return fmt.Errorf("update project %s: could not fetch project: %w", edits.GetId(), err) } // We do not compare currentProject against edits directly, because edits does not @@ -139,10 +139,10 @@ func updateProjectsFunc(ctx context.Context, args []string, cmdCtx cmdCore.Comma _, err = cmdCtx.AdminClient().UpdateProject(ctx, edits) if err != nil { - return fmt.Errorf(clierrors.ErrFailedProjectUpdate, edits.Id, err) + return fmt.Errorf(clierrors.ErrFailedProjectUpdate, edits.GetId(), err) } - fmt.Printf("project %s updated\n", edits.Id) + fmt.Printf("project %s updated\n", edits.GetId()) return nil } @@ -152,14 +152,14 @@ func updateProjectsFunc(ctx context.Context, args []string, cmdCtx cmdCore.Comma func copyProjectWithEdits(target *admin.Project, edited *admin.Project, projectConfig *project.ConfigProject) *admin.Project { copy := *target - if edited.Name != "" { - copy.Name = edited.Name + if edited.GetName() != "" { + copy.Name = edited.GetName() } - if edited.Description != "" { - copy.Description = edited.Description + if edited.GetDescription() != "" { + copy.Description = edited.GetDescription() } if len(edited.GetLabels().GetValues()) != 0 { - copy.Labels = edited.Labels + copy.Labels = edited.GetLabels() } // `edited` comes with `admin.Project_ACTIVE` state by default @@ -182,9 +182,9 @@ func copyProjectWithEdits(target *admin.Project, edited *admin.Project, projectC // YAML file input, and the flags for `ConfigProject` would also // be good. if projectConfig.Archive || projectConfig.Activate { - copy.State = edited.State + copy.State = edited.GetState() } else { - edited.State = copy.State + edited.State = copy.GetState() } return © } diff --git a/flytectl/cmd/update/project_test.go b/flytectl/cmd/update/project_test.go index 0ca41c4309..2451163942 100644 --- a/flytectl/cmd/update/project_test.go +++ b/flytectl/cmd/update/project_test.go @@ -27,7 +27,7 @@ func TestProjectCanBeActivated(t *testing.T) { t, "UpdateProject", s.Ctx, mock.MatchedBy( func(r *admin.Project) bool { - return r.State == admin.Project_ACTIVE + return r.GetState() == admin.Project_ACTIVE })) }) } @@ -46,7 +46,7 @@ func TestProjectCanBeArchived(t *testing.T) { t, "UpdateProject", s.Ctx, mock.MatchedBy( func(r *admin.Project) bool { - return r.State == admin.Project_ARCHIVED + return r.GetState() == admin.Project_ARCHIVED })) }) } @@ -145,7 +145,7 @@ func TestProjectUpdateFailsWhenProjectDoesNotExist(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, project *admin.Project) { s.FetcherExt. - OnGetProjectByID(s.Ctx, project.Id). + OnGetProjectByID(s.Ctx, project.GetId()). Return(nil, ext.NewNotFoundError("project not found")) s.MockAdminClient. OnUpdateProjectMatch(s.Ctx, mock.Anything). @@ -164,7 +164,7 @@ func TestProjectUpdateFailsWhenAdminClientFails(t *testing.T) { t, /* mockSetup */ func(s *testutils.TestStruct, project *admin.Project) { s.FetcherExt. - OnGetProjectByID(s.Ctx, project.Id). + OnGetProjectByID(s.Ctx, project.GetId()). Return(project, nil) s.MockAdminClient. OnUpdateProjectMatch(s.Ctx, mock.Anything). @@ -209,7 +209,7 @@ func TestProjectUpdateDoesNotActivateArchivedProject(t *testing.T) { t, "UpdateProject", s.Ctx, mock.MatchedBy( func(r *admin.Project) bool { - return r.State == admin.Project_ARCHIVED + return r.GetState() == admin.Project_ARCHIVED })) }) } @@ -223,7 +223,7 @@ func testProjectUpdate( t, /* mockSetup */ func(s *testutils.TestStruct, project *admin.Project) { s.FetcherExt. - OnGetProjectByID(s.Ctx, project.Id). + OnGetProjectByID(s.Ctx, project.GetId()). Return(project, nil) s.MockAdminClient. OnUpdateProjectMatch(s.Ctx, mock.Anything). @@ -249,7 +249,7 @@ func testProjectUpdateWithMockSetup( } project.DefaultProjectConfig = &project.ConfigProject{ - ID: target.Id, + ID: target.GetId(), } config.GetConfig().Project = "" config.GetConfig().Domain = "" diff --git a/flytectl/cmd/update/task_meta.go b/flytectl/cmd/update/task_meta.go index 3783c2dcfc..8e68778c99 100644 --- a/flytectl/cmd/update/task_meta.go +++ b/flytectl/cmd/update/task_meta.go @@ -37,7 +37,7 @@ func getUpdateTaskFunc(namedEntityConfig *NamedEntityConfig) func(ctx context.Co project := config.GetConfig().Project domain := config.GetConfig().Domain if len(args) != 1 { - return fmt.Errorf(clierrors.ErrTaskNotPassed) + return fmt.Errorf(clierrors.ErrTaskNotPassed) //nolint } name := args[0] diff --git a/flytectl/cmd/update/workflow_meta.go b/flytectl/cmd/update/workflow_meta.go index e2a416e0aa..c6604bfb86 100644 --- a/flytectl/cmd/update/workflow_meta.go +++ b/flytectl/cmd/update/workflow_meta.go @@ -37,7 +37,7 @@ func getUpdateWorkflowFunc(namedEntityConfig *NamedEntityConfig) func(ctx contex project := config.GetConfig().Project domain := config.GetConfig().Domain if len(args) != 1 { - return fmt.Errorf(clierrors.ErrWorkflowNotPassed) + return fmt.Errorf(clierrors.ErrWorkflowNotPassed) //nolint } name := args[0] err := namedEntityConfig.UpdateNamedEntity(ctx, name, project, domain, core.ResourceType_WORKFLOW, cmdCtx) diff --git a/flytectl/cmd/version/version.go b/flytectl/cmd/version/version.go index 88da1330a2..67a28f3531 100644 --- a/flytectl/cmd/version/version.go +++ b/flytectl/cmd/version/version.go @@ -103,9 +103,9 @@ func getControlPlaneVersion(ctx context.Context, cmdCtx cmdCore.CommandContext) } // Print FlyteAdmin if err := printVersion(versionOutput{ - Build: v.ControlPlaneVersion.Build, - BuildTime: v.ControlPlaneVersion.BuildTime, - Version: v.ControlPlaneVersion.Version, + Build: v.GetControlPlaneVersion().GetBuild(), + BuildTime: v.GetControlPlaneVersion().GetBuildTime(), + Version: v.GetControlPlaneVersion().GetVersion(), App: controlPlanAppName, }); err != nil { return fmt.Errorf("Unable to get the control plane version. Please try again: %v", err) diff --git a/flytectl/go.mod b/flytectl/go.mod index b657a02d4d..a8b0fe8bb2 100644 --- a/flytectl/go.mod +++ b/flytectl/go.mod @@ -141,6 +141,7 @@ require ( github.com/prometheus/procfs v0.15.1 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/santhosh-tekuri/jsonschema v1.2.4 // indirect github.com/shamaton/msgpack/v2 v2.2.2 // indirect github.com/spf13/afero v1.9.2 // indirect github.com/spf13/cast v1.4.1 // indirect @@ -148,6 +149,12 @@ require ( github.com/spf13/viper v1.11.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.2.0 // indirect + github.com/tidwall/gjson v1.17.1 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/tidwall/sjson v1.2.5 // indirect + github.com/wI2L/jsondiff v0.6.0 // indirect + gitlab.com/yvesf/json-schema-compare v0.0.0-20190604192943-a900c04201f7 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect diff --git a/flytectl/go.sum b/flytectl/go.sum index f57ca65c0a..9f81c0ec9d 100644 --- a/flytectl/go.sum +++ b/flytectl/go.sum @@ -418,6 +418,8 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/santhosh-tekuri/jsonschema v1.2.4 h1:hNhW8e7t+H1vgY+1QeEQpveR6D4+OwKPXCfD2aieJis= +github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= github.com/shamaton/msgpack/v2 v2.2.2 h1:GOIg0c9LV04VwzOOqZSrmsv/JzjNOOMxnS/HvOHGdgs= github.com/shamaton/msgpack/v2 v2.2.2/go.mod h1:6khjYnkx73f7VQU7wjcFS9DFjs+59naVWJv1TB7qdOI= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -457,6 +459,18 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.17.1 h1:wlYEnwqAHgzmhNUFfw7Xalt2JzQvsMx2Se4PcoFCT/U= +github.com/tidwall/gjson v1.17.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/wI2L/jsondiff v0.6.0 h1:zrsH3FbfVa3JO9llxrcDy/XLkYPLgoMX6Mz3T2PP2AI= +github.com/wI2L/jsondiff v0.6.0/go.mod h1:D6aQ5gKgPF9g17j+E9N7aasmU1O+XvfmWm1y8UMmNpw= github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 h1:6fRhSjgLCkTD3JnJxvaJ4Sj+TYblw757bqYgZaOq5ZY= github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -466,6 +480,8 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zalando/go-keyring v0.1.1 h1:w2V9lcx/Uj4l+dzAf1m9s+DJ1O8ROkEHnynonHjTcYE= github.com/zalando/go-keyring v0.1.1/go.mod h1:OIC+OZ28XbmwFxU/Rp9V7eKzZjamBJwRzC8UFJH9+L8= +gitlab.com/yvesf/json-schema-compare v0.0.0-20190604192943-a900c04201f7 h1:BAkxmYRc1ZPl6Gap4HWqwPT8yLZMrgaAwx12Ft408sg= +gitlab.com/yvesf/json-schema-compare v0.0.0-20190604192943-a900c04201f7/go.mod h1:X40Z1OU8o1oiXWzBmkuYOaruzYGv60l0AxGiB0E9keI= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= diff --git a/flytectl/pkg/bubbletea/bubbletea_pagination.go b/flytectl/pkg/bubbletea/bubbletea_pagination.go index 01a4b7ea98..bc76aaec1c 100644 --- a/flytectl/pkg/bubbletea/bubbletea_pagination.go +++ b/flytectl/pkg/bubbletea/bubbletea_pagination.go @@ -147,7 +147,7 @@ func Paginator(_listHeader []printer.Column, _callback DataCallback, _filter fil listHeader = _listHeader callback = _callback filter = _filter - filter.Page = int32(_max(int(filter.Page), 1)) + filter.Page = max(filter.Page, 1) firstBatchIndex = (int(filter.Page) - 1) / pagePerBatch lastBatchIndex = firstBatchIndex diff --git a/flytectl/pkg/bubbletea/bubbletea_pagination_util.go b/flytectl/pkg/bubbletea/bubbletea_pagination_util.go index dc6ddd735b..3d2a02dd09 100644 --- a/flytectl/pkg/bubbletea/bubbletea_pagination_util.go +++ b/flytectl/pkg/bubbletea/bubbletea_pagination_util.go @@ -65,23 +65,9 @@ func (p printTableProto) MarshalJSON() ([]byte, error) { return buf.Bytes(), nil } -func _max(a, b int) int { - if a > b { - return a - } - return b -} - -func _min(a, b int) int { - if a < b { - return a - } - return b -} - func getSliceBounds(m *pageModel) (start int, end int) { start = (m.paginator.Page - firstBatchIndex*pagePerBatch) * msgPerPage - end = _min(start+msgPerPage, len(*m.items)) + end = min(start+msgPerPage, len(*m.items)) return start, end } @@ -117,7 +103,7 @@ func getMessageList(batchIndex int) ([]proto.Message, error) { msg, err := callback(filters.Filters{ Limit: msgPerBatch, - Page: int32(batchIndex + 1), + Page: int32(batchIndex + 1), // #nosec G115 SortBy: filter.SortBy, Asc: filter.Asc, }) diff --git a/flytectl/pkg/docker/docker_util.go b/flytectl/pkg/docker/docker_util.go index f093e3d49a..a495fdc514 100644 --- a/flytectl/pkg/docker/docker_util.go +++ b/flytectl/pkg/docker/docker_util.go @@ -134,14 +134,14 @@ func GetSandboxPorts() (map[nat.Port]struct{}, map[nat.Port][]nat.PortBinding, e } // GetDemoPorts will return demo ports -func GetDemoPorts() (map[nat.Port]struct{}, map[nat.Port][]nat.PortBinding, error) { +func GetDemoPorts(k8sPort string) (map[nat.Port]struct{}, map[nat.Port][]nat.PortBinding, error) { return nat.ParsePortSpecs([]string{ - "0.0.0.0:6443:6443", // K3s API Port - "0.0.0.0:30080:30080", // HTTP Port - "0.0.0.0:30000:30000", // Registry Port - "0.0.0.0:30001:30001", // Postgres Port - "0.0.0.0:30002:30002", // Minio API Port (use HTTP port for minio console) - "0.0.0.0:30003:30003", // Buildkit Port + fmt.Sprintf("0.0.0.0:%s:6443", k8sPort), // K3s API Port + "0.0.0.0:30080:30080", // HTTP Port + "0.0.0.0:30000:30000", // Registry Port + "0.0.0.0:30001:30001", // Postgres Port + "0.0.0.0:30002:30002", // Minio API Port (use HTTP port for minio console) + "0.0.0.0:30003:30003", // Buildkit Port }) } diff --git a/flytectl/pkg/docker/docker_util_test.go b/flytectl/pkg/docker/docker_util_test.go index 8decd8824d..a03acab866 100644 --- a/flytectl/pkg/docker/docker_util_test.go +++ b/flytectl/pkg/docker/docker_util_test.go @@ -435,7 +435,7 @@ func TestGetOrCreateVolume(t *testing.T) { } func TestDemoPorts(t *testing.T) { - _, ports, _ := GetDemoPorts() + _, ports, _ := GetDemoPorts("6443") assert.Equal(t, 6, len(ports)) } diff --git a/flytectl/pkg/ext/launch_plan_fetcher.go b/flytectl/pkg/ext/launch_plan_fetcher.go index 5a8befc093..8f047f681c 100644 --- a/flytectl/pkg/ext/launch_plan_fetcher.go +++ b/flytectl/pkg/ext/launch_plan_fetcher.go @@ -19,10 +19,10 @@ func (a *AdminFetcherExtClient) FetchAllVerOfLP(ctx context.Context, lpName, pro if err != nil { return nil, err } - if len(tList.LaunchPlans) == 0 { + if len(tList.GetLaunchPlans()) == 0 { return nil, fmt.Errorf("no launchplans retrieved for %v", lpName) } - return tList.LaunchPlans, nil + return tList.GetLaunchPlans(), nil } // FetchLPLatestVersion fetches latest version for give launch plan name diff --git a/flytectl/pkg/ext/project_fetcher.go b/flytectl/pkg/ext/project_fetcher.go index a1e83fdf70..f6495b8ff2 100644 --- a/flytectl/pkg/ext/project_fetcher.go +++ b/flytectl/pkg/ext/project_fetcher.go @@ -33,13 +33,13 @@ func (a *AdminFetcherExtClient) GetProjectByID(ctx context.Context, projectID st return nil, err } - if len(response.Projects) == 0 { + if len(response.GetProjects()) == 0 { return nil, NewNotFoundError("project %s", projectID) } - if len(response.Projects) > 1 { - panic(fmt.Sprintf("unexpected number of projects in ListProjects response: %d - 0 or 1 expected", len(response.Projects))) + if len(response.GetProjects()) > 1 { + panic(fmt.Sprintf("unexpected number of projects in ListProjects response: %d - 0 or 1 expected", len(response.GetProjects()))) } - return response.Projects[0], nil + return response.GetProjects()[0], nil } diff --git a/flytectl/pkg/ext/task_fetcher.go b/flytectl/pkg/ext/task_fetcher.go index 53c0acccb0..d602ef59b3 100644 --- a/flytectl/pkg/ext/task_fetcher.go +++ b/flytectl/pkg/ext/task_fetcher.go @@ -18,10 +18,10 @@ func (a *AdminFetcherExtClient) FetchAllVerOfTask(ctx context.Context, name, pro if err != nil { return nil, err } - if len(tList.Tasks) == 0 { + if len(tList.GetTasks()) == 0 { return nil, fmt.Errorf("no tasks retrieved for %v", name) } - return tList.Tasks, nil + return tList.GetTasks(), nil } func (a *AdminFetcherExtClient) FetchTaskLatestVersion(ctx context.Context, name, project, domain string, filter filters.Filters) (*admin.Task, error) { diff --git a/flytectl/pkg/ext/workflow_fetcher.go b/flytectl/pkg/ext/workflow_fetcher.go index 69032bb998..0aacdd756a 100644 --- a/flytectl/pkg/ext/workflow_fetcher.go +++ b/flytectl/pkg/ext/workflow_fetcher.go @@ -19,10 +19,10 @@ func (a *AdminFetcherExtClient) FetchAllVerOfWorkflow(ctx context.Context, workf if err != nil { return nil, err } - if len(wList.Workflows) == 0 { + if len(wList.GetWorkflows()) == 0 { return nil, fmt.Errorf("no workflow retrieved for %v", workflowName) } - return wList.Workflows, nil + return wList.GetWorkflows(), nil } // FetchAllWorkflows fetches all workflows in project domain @@ -35,10 +35,10 @@ func (a *AdminFetcherExtClient) FetchAllWorkflows(ctx context.Context, project, if err != nil { return nil, err } - if len(wList.Entities) == 0 { + if len(wList.GetEntities()) == 0 { return nil, fmt.Errorf("no workflow retrieved for %v project %v domain", project, domain) } - return wList.Entities, nil + return wList.GetEntities(), nil } // FetchWorkflowLatestVersion fetches latest version for given workflow name @@ -53,7 +53,7 @@ func (a *AdminFetcherExtClient) FetchWorkflowLatestVersion(ctx context.Context, if err != nil { return nil, err } - return a.FetchWorkflowVersion(ctx, name, wVersions[0].Id.Version, project, domain) + return a.FetchWorkflowVersion(ctx, name, wVersions[0].GetId().GetVersion(), project, domain) } // FetchWorkflowVersion fetches particular version of workflow diff --git a/flytectl/pkg/filters/util.go b/flytectl/pkg/filters/util.go index a19481e32d..aed4d25f16 100644 --- a/flytectl/pkg/filters/util.go +++ b/flytectl/pkg/filters/util.go @@ -13,7 +13,7 @@ func BuildResourceListRequestWithName(c Filters, project, domain, name string) ( return nil, err } request := &admin.ResourceListRequest{ - Limit: uint32(c.Limit), + Limit: uint32(c.Limit), // #nosec G115 Token: getToken(c), Filters: fieldSelector, Id: &admin.NamedEntityIdentifier{ @@ -36,7 +36,7 @@ func BuildNamedEntityListRequest(c Filters, project, domain string, resourceType return nil, err } request := &admin.NamedEntityListRequest{ - Limit: uint32(c.Limit), + Limit: uint32(c.Limit), // #nosec G115 Token: getToken(c), Filters: fieldSelector, Project: project, @@ -55,7 +55,7 @@ func BuildProjectListRequest(c Filters) (*admin.ProjectListRequest, error) { return nil, err } request := &admin.ProjectListRequest{ - Limit: uint32(c.Limit), + Limit: uint32(c.Limit), // #nosec G115 Token: getToken(c), Filters: fieldSelector, SortBy: buildSortingRequest(c), diff --git a/flytectl/pkg/k8s/k8s_test.go b/flytectl/pkg/k8s/k8s_test.go index 84dc16923c..41ea4eaafa 100644 --- a/flytectl/pkg/k8s/k8s_test.go +++ b/flytectl/pkg/k8s/k8s_test.go @@ -45,7 +45,8 @@ users: t.Error(err) } defer os.Remove(tmpfile.Name()) - if err := ioutil.WriteFile(tmpfile.Name(), []byte(content), os.ModePerm); err != nil { + // #nosec G306 + if err := os.WriteFile(tmpfile.Name(), []byte(content), os.ModePerm); err != nil { t.Error(err) } t.Run("Create client from config", func(t *testing.T) { diff --git a/flytectl/pkg/printer/printer.go b/flytectl/pkg/printer/printer.go index df7effd8a1..bc67d75e9d 100644 --- a/flytectl/pkg/printer/printer.go +++ b/flytectl/pkg/printer/printer.go @@ -199,8 +199,8 @@ func FormatVariableDescriptions(variableMap map[string]*core.Variable) { for _, k := range keys { v := variableMap[k] // a: a isn't very helpful - if k != v.Description { - descriptions = append(descriptions, getTruncatedLine(fmt.Sprintf("%s: %s", k, v.Description))) + if k != v.GetDescription() { + descriptions = append(descriptions, getTruncatedLine(fmt.Sprintf("%s: %s", k, v.GetDescription()))) } else { descriptions = append(descriptions, getTruncatedLine(k)) } @@ -220,12 +220,12 @@ func FormatParameterDescriptions(parameterMap map[string]*core.Parameter) { var descriptions []string for _, k := range keys { v := parameterMap[k] - if v.Var == nil { + if v.GetVar() == nil { continue } // a: a isn't very helpful - if k != v.Var.Description { - descriptions = append(descriptions, getTruncatedLine(fmt.Sprintf("%s: %s", k, v.Var.Description))) + if k != v.GetVar().GetDescription() { + descriptions = append(descriptions, getTruncatedLine(fmt.Sprintf("%s: %s", k, v.GetVar().GetDescription()))) } else { descriptions = append(descriptions, getTruncatedLine(k)) } @@ -272,7 +272,7 @@ func (p Printer) Print(format OutputFormat, columns []Column, messages ...proto. return fmt.Errorf("at least one workflow required for visualization") } workflow := workflows[0] - graphStr, err := visualize.RenderWorkflow(workflow.Closure.CompiledWorkflow) + graphStr, err := visualize.RenderWorkflow(workflow.GetClosure().GetCompiledWorkflow()) if err != nil { return errors.Wrapf("VisualizationError", err, "failed to visualize workflow") } diff --git a/flytectl/pkg/printer/printer_test.go b/flytectl/pkg/printer/printer_test.go index afc5edb7b1..3783e5f7ca 100644 --- a/flytectl/pkg/printer/printer_test.go +++ b/flytectl/pkg/printer/printer_test.go @@ -282,7 +282,7 @@ func TestFormatVariableDescriptions(t *testing.T) { "bar": barVar, } FormatVariableDescriptions(variableMap) - assert.Equal(t, "bar\nfoo\nvar1: foo\nvar2: bar", variableMap[DefaultFormattedDescriptionsKey].Description) + assert.Equal(t, "bar\nfoo\nvar1: foo\nvar2: bar", variableMap[DefaultFormattedDescriptionsKey].GetDescription()) } func TestFormatParameterDescriptions(t *testing.T) { @@ -305,5 +305,5 @@ func TestFormatParameterDescriptions(t *testing.T) { "empty": emptyParam, } FormatParameterDescriptions(paramMap) - assert.Equal(t, "bar\nfoo\nvar1: foo\nvar2: bar", paramMap[DefaultFormattedDescriptionsKey].Var.Description) + assert.Equal(t, "bar\nfoo\nvar1: foo\nvar2: bar", paramMap[DefaultFormattedDescriptionsKey].GetVar().GetDescription()) } diff --git a/flytectl/pkg/sandbox/reload.go b/flytectl/pkg/sandbox/reload.go new file mode 100644 index 0000000000..f68b385443 --- /dev/null +++ b/flytectl/pkg/sandbox/reload.go @@ -0,0 +1,47 @@ +package sandbox + +import ( + "context" + "fmt" + + sandboxCmdConfig "github.com/flyteorg/flyte/flytectl/cmd/config/subcommand/sandbox" + "github.com/flyteorg/flyte/flytectl/pkg/docker" + "github.com/flyteorg/flyte/flytectl/pkg/k8s" + "github.com/flyteorg/flyte/flytestdlib/logger" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + flyteNs = "flyte" + labelSelector = "app.kubernetes.io/name=flyte-binary" +) + +// LegacyReloadDemoCluster will kill the flyte binary pod so the new one can pick up a new config file +func LegacyReloadDemoCluster(ctx context.Context, sandboxConfig *sandboxCmdConfig.Config) error { + k8sEndpoint := sandboxConfig.GetK8sEndpoint() + k8sClient, err := k8s.GetK8sClient(docker.Kubeconfig, k8sEndpoint) + if err != nil { + fmt.Println("Could not get K8s client") + return err + } + pi := k8sClient.CoreV1().Pods(flyteNs) + podList, err := pi.List(ctx, v1.ListOptions{LabelSelector: labelSelector}) + if err != nil { + fmt.Println("could not list pods") + return err + } + if len(podList.Items) != 1 { + return fmt.Errorf("should only have one pod running, %d found, %v", len(podList.Items), podList.Items) + } + logger.Debugf(ctx, "Found %d pods\n", len(podList.Items)) + var grace = int64(0) + err = pi.Delete(ctx, podList.Items[0].Name, v1.DeleteOptions{ + GracePeriodSeconds: &grace, + }) + if err != nil { + fmt.Printf("Could not delete Flyte pod, old configuration may still be in effect. Err: %s\n", err) + return err + } + + return nil +} diff --git a/flytectl/pkg/sandbox/start.go b/flytectl/pkg/sandbox/start.go index 6681baf5e1..e638301741 100644 --- a/flytectl/pkg/sandbox/start.go +++ b/flytectl/pkg/sandbox/start.go @@ -36,7 +36,6 @@ const ( taintEffect = "NoSchedule" sandboxContextName = "flyte-sandbox" sandboxDockerContext = "default" - K8sEndpoint = "https://127.0.0.1:6443" sandboxK8sEndpoint = "https://127.0.0.1:30086" sandboxImageName = "cr.flyte.org/flyteorg/flyte-sandbox" demoImageName = "cr.flyte.org/flyteorg/flyte-sandbox-bundled" @@ -280,12 +279,13 @@ func StartCluster(ctx context.Context, args []string, sandboxConfig *sandboxCmdC return err } + k8sEndpoint := sandboxConfig.GetK8sEndpoint() if reader != nil { var k8sClient k8s.K8s err = retry.Do( func() error { // This should wait for the kubeconfig file being there. - k8sClient, err = k8s.GetK8sClient(docker.Kubeconfig, K8sEndpoint) + k8sClient, err = k8s.GetK8sClient(docker.Kubeconfig, k8sEndpoint) return err }, retry.Attempts(10), @@ -299,7 +299,7 @@ func StartCluster(ctx context.Context, args []string, sandboxConfig *sandboxCmdC err = retry.Do( func() error { // Have to get a new client every time because you run into x509 errors if not - k8sClient, err = k8s.GetK8sClient(docker.Kubeconfig, K8sEndpoint) + k8sClient, err = k8s.GetK8sClient(docker.Kubeconfig, k8sEndpoint) if err != nil { logger.Debugf(ctx, "Error getting K8s client in liveness check %s", err) return err @@ -398,7 +398,7 @@ func StartClusterForSandbox(ctx context.Context, args []string, sandboxConfig *s func StartDemoCluster(ctx context.Context, args []string, sandboxConfig *sandboxCmdConfig.Config) error { sandboxImagePrefix := "sha" - exposedPorts, portBindings, err := docker.GetDemoPorts() + exposedPorts, portBindings, err := docker.GetDemoPorts(sandboxConfig.Port) if err != nil { return err } diff --git a/flytectl/pkg/sandbox/start_test.go b/flytectl/pkg/sandbox/start_test.go index 9d24295758..84a0a4fd35 100644 --- a/flytectl/pkg/sandbox/start_test.go +++ b/flytectl/pkg/sandbox/start_test.go @@ -123,7 +123,7 @@ func TestStartFunc(t *testing.T) { config.DisableAgent = true assert.Nil(t, util.SetupFlyteDir()) assert.Nil(t, os.MkdirAll(f.FilePathJoin(f.UserHomeDir(), ".flyte", "state"), os.ModePerm)) - assert.Nil(t, ioutil.WriteFile(docker.Kubeconfig, []byte(content), os.ModePerm)) + assert.Nil(t, os.WriteFile(docker.Kubeconfig, []byte(content), os.ModePerm)) // #nosec G306 fakePod.SetName("flyte") diff --git a/flytectl/pkg/util/util.go b/flytectl/pkg/util/util.go index 18067d1702..49b1278c14 100644 --- a/flytectl/pkg/util/util.go +++ b/flytectl/pkg/util/util.go @@ -3,7 +3,6 @@ package util import ( "fmt" "io" - "io/ioutil" "net/http" "os" "path/filepath" @@ -26,7 +25,7 @@ var Ext string // WriteIntoFile will write content in a file func WriteIntoFile(data []byte, file string) error { - err := ioutil.WriteFile(file, data, os.ModePerm) + err := os.WriteFile(file, data, os.ModePerm) // #nosec G306 if err != nil { return err } @@ -38,6 +37,7 @@ func CreatePathAndFile(pathToConfig string) error { if err != nil { return err } + // #nosec G306 if err := os.MkdirAll(filepath.Dir(p), os.ModePerm); err != nil { return err } @@ -45,6 +45,7 @@ func CreatePathAndFile(pathToConfig string) error { // Created a empty file with right permission if _, err := os.Stat(p); err != nil { if os.IsNotExist(err) { + // #nosec G306 if err := os.WriteFile(p, []byte(""), os.ModePerm); err != nil { return err } @@ -62,6 +63,7 @@ func SetupFlyteDir() error { // Created a empty file with right permission if _, err := os.Stat(docker.Kubeconfig); err != nil { if os.IsNotExist(err) { + // #nosec G306 if err := os.WriteFile(docker.Kubeconfig, []byte(""), os.ModePerm); err != nil { return err } diff --git a/flytectl/pkg/visualize/graphviz.go b/flytectl/pkg/visualize/graphviz.go index 745c7ad248..be4f275fc5 100644 --- a/flytectl/pkg/visualize/graphviz.go +++ b/flytectl/pkg/visualize/graphviz.go @@ -56,11 +56,11 @@ func operandToString(op *core.Operand) string { } func comparisonToString(expr *core.ComparisonExpression) string { - return fmt.Sprintf("%s %s %s", operandToString(expr.LeftValue), expr.Operator.String(), operandToString(expr.RightValue)) + return fmt.Sprintf("%s %s %s", operandToString(expr.GetLeftValue()), expr.GetOperator().String(), operandToString(expr.GetRightValue())) } func conjunctionToString(expr *core.ConjunctionExpression) string { - return fmt.Sprintf("(%s) %s (%s)", booleanExprToString(expr.LeftExpression), expr.Operator.String(), booleanExprToString(expr.RightExpression)) + return fmt.Sprintf("(%s) %s (%s)", booleanExprToString(expr.GetLeftExpression()), expr.GetOperator().String(), booleanExprToString(expr.GetRightExpression())) } func booleanExprToString(expr *core.BooleanExpression) string { @@ -86,9 +86,9 @@ func constructEndNode(parentGraph string, n string, graph Graphvizer) (*graphviz func constructTaskNode(parentGraph string, name string, graph Graphvizer, n *core.Node, t *core.CompiledTask) (*graphviz.Node, error) { attrs := map[string]string{ShapeType: BoxShape} - if n.Metadata != nil && n.Metadata.Name != "" { - v := strings.LastIndexAny(n.Metadata.Name, ".") - attrs[LabelAttr] = fmt.Sprintf("\"%s [%s]\"", n.Metadata.Name[v+1:], t.Template.Type) + if n.GetMetadata() != nil && n.GetMetadata().GetName() != "" { + v := strings.LastIndexAny(n.GetMetadata().GetName(), ".") + attrs[LabelAttr] = fmt.Sprintf("\"%s [%s]\"", n.GetMetadata().GetName()[v+1:], t.GetTemplate().GetType()) } tName := strings.ReplaceAll(name, "-", "_") err := graph.AddNode(parentGraph, tName, attrs) @@ -104,8 +104,8 @@ func constructErrorNode(parentGraph string, name string, graph Graphvizer, m str func constructBranchConditionNode(parentGraph string, name string, graph Graphvizer, n *core.Node) (*graphviz.Node, error) { attrs := map[string]string{ShapeType: DiamondShape} - if n.Metadata != nil && n.Metadata.Name != "" { - attrs[LabelAttr] = fmt.Sprintf("\"[%s]\"", n.Metadata.Name) + if n.GetMetadata() != nil && n.GetMetadata().GetName() != "" { + attrs[LabelAttr] = fmt.Sprintf("\"[%s]\"", n.GetMetadata().GetName()) } cName := strings.ReplaceAll(name, "-", "_") err := graph.AddNode(parentGraph, cName, attrs) @@ -151,27 +151,27 @@ func (gb *graphBuilder) addBranchSubNodeEdge(graph Graphvizer, parentNode, n *gr } func (gb *graphBuilder) constructBranchNode(parentGraph string, prefix string, graph Graphvizer, n *core.Node) (*graphviz.Node, error) { - parentBranchNode, err := constructBranchConditionNode(parentGraph, getName(prefix, n.Id), graph, n) + parentBranchNode, err := constructBranchConditionNode(parentGraph, getName(prefix, n.GetId()), graph, n) if err != nil { return nil, err } - gb.graphNodes[n.Id] = parentBranchNode + gb.graphNodes[n.GetId()] = parentBranchNode if n.GetBranchNode().GetIfElse() == nil { return parentBranchNode, nil } - subNode, err := gb.constructNode(parentGraph, prefix, graph, n.GetBranchNode().GetIfElse().Case.ThenNode) + subNode, err := gb.constructNode(parentGraph, prefix, graph, n.GetBranchNode().GetIfElse().GetCase().GetThenNode()) if err != nil { return nil, err } - if err := gb.addBranchSubNodeEdge(graph, parentBranchNode, subNode, booleanExprToString(n.GetBranchNode().GetIfElse().Case.Condition)); err != nil { + if err := gb.addBranchSubNodeEdge(graph, parentBranchNode, subNode, booleanExprToString(n.GetBranchNode().GetIfElse().GetCase().GetCondition())); err != nil { return nil, err } if n.GetBranchNode().GetIfElse().GetError() != nil { name := fmt.Sprintf("%s-error", parentBranchNode.Name) - subNode, err := constructErrorNode(prefix, name, graph, n.GetBranchNode().GetIfElse().GetError().Message) + subNode, err := constructErrorNode(prefix, name, graph, n.GetBranchNode().GetIfElse().GetError().GetMessage()) if err != nil { return nil, err } @@ -191,11 +191,11 @@ func (gb *graphBuilder) constructBranchNode(parentGraph string, prefix string, g if n.GetBranchNode().GetIfElse().GetOther() != nil { for _, c := range n.GetBranchNode().GetIfElse().GetOther() { - subNode, err := gb.constructNode(parentGraph, prefix, graph, c.ThenNode) + subNode, err := gb.constructNode(parentGraph, prefix, graph, c.GetThenNode()) if err != nil { return nil, err } - if err := gb.addBranchSubNodeEdge(graph, parentBranchNode, subNode, booleanExprToString(c.Condition)); err != nil { + if err := gb.addBranchSubNodeEdge(graph, parentBranchNode, subNode, booleanExprToString(c.GetCondition())); err != nil { return nil, err } } @@ -204,18 +204,18 @@ func (gb *graphBuilder) constructBranchNode(parentGraph string, prefix string, g } func (gb *graphBuilder) constructNode(parentGraphName string, prefix string, graph Graphvizer, n *core.Node) (*graphviz.Node, error) { - name := getName(prefix, n.Id) + name := getName(prefix, n.GetId()) var err error var gn *graphviz.Node - if n.Id == StartNode { + if n.GetId() == StartNode { gn, err = constructStartNode(parentGraphName, strings.ReplaceAll(name, "-", "_"), graph) gb.nodeClusters[name] = parentGraphName - } else if n.Id == EndNode { + } else if n.GetId() == EndNode { gn, err = constructEndNode(parentGraphName, strings.ReplaceAll(name, "-", "_"), graph) gb.nodeClusters[name] = parentGraphName } else { - switch n.Target.(type) { + switch n.GetTarget().(type) { case *core.Node_TaskNode: tID := n.GetTaskNode().GetReferenceId().String() t, ok := gb.tasks[tID] @@ -228,7 +228,7 @@ func (gb *graphBuilder) constructNode(parentGraphName string, prefix string, gra } gb.nodeClusters[name] = parentGraphName case *core.Node_BranchNode: - sanitizedName := strings.ReplaceAll(n.Metadata.Name, "-", "_") + sanitizedName := strings.ReplaceAll(n.GetMetadata().GetName(), "-", "_") branchSubGraphName := SubgraphPrefix + sanitizedName err := graph.AddSubGraph(parentGraphName, branchSubGraphName, map[string]string{LabelAttr: sanitizedName}) if err != nil { @@ -269,7 +269,7 @@ func (gb *graphBuilder) constructNode(parentGraphName string, prefix string, gra if err != nil { return nil, err } - gb.graphNodes[n.Id] = gn + gb.graphNodes[n.GetId()] = gn return gn, nil } @@ -298,27 +298,27 @@ func (gb *graphBuilder) addEdge(fromNodeName, toNodeName string, graph Graphvize } func (gb *graphBuilder) constructGraph(parentGraphName string, prefix string, graph Graphvizer, w *core.CompiledWorkflow) error { - if w == nil || w.Template == nil { + if w == nil || w.GetTemplate() == nil { return nil } - for _, n := range w.Template.Nodes { + for _, n := range w.GetTemplate().GetNodes() { if _, err := gb.constructNode(parentGraphName, prefix, graph, n); err != nil { return err } } for name := range gb.graphNodes { - upstreamNodes := w.Connections.Upstream[name] - downstreamNodes := w.Connections.Downstream[name] + upstreamNodes := w.GetConnections().GetUpstream()[name] + downstreamNodes := w.GetConnections().GetDownstream()[name] if downstreamNodes != nil { - for _, n := range downstreamNodes.Ids { + for _, n := range downstreamNodes.GetIds() { if err := gb.addEdge(name, n, graph); err != nil { return err } } } if upstreamNodes != nil { - for _, n := range upstreamNodes.Ids { + for _, n := range upstreamNodes.GetIds() { if err := gb.addEdge(n, name, graph); err != nil { return err } @@ -334,23 +334,23 @@ func (gb *graphBuilder) CompiledWorkflowClosureToGraph(w *core.CompiledWorkflowC _ = dotGraph.SetStrict(true) tLookup := make(map[string]*core.CompiledTask) - for _, t := range w.Tasks { - if t.Template == nil || t.Template.Id == nil { + for _, t := range w.GetTasks() { + if t.GetTemplate() == nil || t.GetTemplate().GetId() == nil { return FlyteGraph{}, fmt.Errorf("no template found in the workflow task %v", t) } - tLookup[t.Template.Id.String()] = t + tLookup[t.GetTemplate().GetId().String()] = t } gb.tasks = tLookup wLookup := make(map[string]*core.CompiledWorkflow) - for _, swf := range w.SubWorkflows { - if swf.Template == nil || swf.Template.Id == nil { + for _, swf := range w.GetSubWorkflows() { + if swf.GetTemplate() == nil || swf.GetTemplate().GetId() == nil { return FlyteGraph{}, fmt.Errorf("no template found in the sub workflow %v", swf) } - wLookup[swf.Template.Id.String()] = swf + wLookup[swf.GetTemplate().GetId().String()] = swf } gb.subWf = wLookup - return dotGraph, gb.constructGraph("", "", dotGraph, w.Primary) + return dotGraph, gb.constructGraph("", "", dotGraph, w.GetPrimary()) } func newGraphBuilder() *graphBuilder { diff --git a/flyteidl/.golangci.yml b/flyteidl/.golangci.yml index 7714cbe5a3..8eda34cffe 100644 --- a/flyteidl/.golangci.yml +++ b/flyteidl/.golangci.yml @@ -1,31 +1,22 @@ -# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. -# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY: -# -# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst - run: skip-dirs: - - pkg/client + - pkg/client - gen - linters: disable-all: true enable: - - deadcode - errcheck - - gas + - gosec - goconst - goimports - - golint - gosimple - govet - ineffassign - misspell - nakedret - staticcheck - - structcheck - typecheck - unconvert - unparam - unused - - varcheck + - protogetter diff --git a/flyteidl/clients/go/admin/auth_interceptor.go b/flyteidl/clients/go/admin/auth_interceptor.go index 5d3d9fd92f..802db2cb0e 100644 --- a/flyteidl/clients/go/admin/auth_interceptor.go +++ b/flyteidl/clients/go/admin/auth_interceptor.go @@ -13,6 +13,7 @@ import ( "google.golang.org/grpc/status" "github.com/flyteorg/flyte/flyteidl/clients/go/admin/cache" + "github.com/flyteorg/flyte/flyteidl/clients/go/admin/utils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" "github.com/flyteorg/flyte/flytestdlib/logger" ) @@ -23,7 +24,6 @@ const ProxyAuthorizationHeader = "proxy-authorization" // Once established, it'll invoke PerRPCCredentialsFuture.Store() on perRPCCredentials to populate it with the appropriate values. func MaterializeCredentials(tokenSource oauth2.TokenSource, cfg *Config, authorizationMetadataKey string, perRPCCredentials *PerRPCCredentialsFuture) error { - _, err := tokenSource.Token() if err != nil { return fmt.Errorf("failed to issue token. Error: %w", err) @@ -35,6 +35,19 @@ func MaterializeCredentials(tokenSource oauth2.TokenSource, cfg *Config, authori return nil } +// MaterializeInMemoryCredentials initializes the perRPCCredentials with the token source containing in memory cached token. +// This path doesn't perform the token refresh and only build the cred source with cached token. +func MaterializeInMemoryCredentials(ctx context.Context, cfg *Config, tokenCache cache.TokenCache, + perRPCCredentials *PerRPCCredentialsFuture, authorizationMetadataKey string) error { + tokenSource, err := NewInMemoryTokenSourceProvider(tokenCache).GetTokenSource(ctx) + if err != nil { + return fmt.Errorf("failed to get token source. Error: %w", err) + } + wrappedTokenSource := NewCustomHeaderTokenSource(tokenSource, cfg.UseInsecureConnection, authorizationMetadataKey) + perRPCCredentials.Store(wrappedTokenSource) + return nil +} + func GetProxyTokenSource(ctx context.Context, cfg *Config) (oauth2.TokenSource, error) { tokenSourceProvider, err := NewExternalTokenSourceProvider(cfg.ProxyCommand) if err != nil { @@ -93,7 +106,7 @@ func setHTTPClientContext(ctx context.Context, cfg *Config, proxyCredentialsFutu transport.Proxy = http.ProxyURL(&cfg.HTTPProxyURL.URL) } - if cfg.ProxyCommand != nil && len(cfg.ProxyCommand) > 0 { + if len(cfg.ProxyCommand) > 0 { httpClient.Transport = &proxyAuthTransport{ transport: transport, proxyCredentialsFuture: proxyCredentialsFuture, @@ -130,7 +143,7 @@ func (o *OauthMetadataProvider) getTokenSourceAndMetadata(cfg *Config, tokenCach if err != nil { return fmt.Errorf("failed to fetch client metadata. Error: %v", err) } - authorizationMetadataKey = clientMetadata.AuthorizationMetadataKey + authorizationMetadataKey = clientMetadata.GetAuthorizationMetadataKey() } tokenSource, err := tokenSourceProvider.GetTokenSource(ctx) @@ -152,6 +165,7 @@ func (o *OauthMetadataProvider) GetOauthMetadata(cfg *Config, tokenCache cache.T if err != nil { logger.Errorf(context.Background(), "Failed to load token related config. Error: %v", err) } + logger.Debugf(context.Background(), "Successfully loaded token related metadata") }) if err != nil { return err @@ -176,22 +190,21 @@ func NewAuthInterceptor(cfg *Config, tokenCache cache.TokenCache, credentialsFut } return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - ctx = setHTTPClientContext(ctx, cfg, proxyCredentialsFuture) - // If there is already a token in the cache (e.g. key-ring), we should use it immediately... t, _ := tokenCache.GetToken() if t != nil { + err := oauthMetadataProvider.GetOauthMetadata(cfg, tokenCache, proxyCredentialsFuture) if err != nil { return err } authorizationMetadataKey := oauthMetadataProvider.authorizationMetadataKey - tokenSource := oauthMetadataProvider.tokenSource - - err = MaterializeCredentials(tokenSource, cfg, authorizationMetadataKey, credentialsFuture) - if err != nil { - return fmt.Errorf("failed to materialize credentials. Error: %v", err) + if isValid := utils.Valid(t); isValid { + err := MaterializeInMemoryCredentials(ctx, cfg, tokenCache, credentialsFuture, authorizationMetadataKey) + if err != nil { + return fmt.Errorf("failed to materialize credentials. Error: %v", err) + } } } @@ -208,13 +221,11 @@ func NewAuthInterceptor(cfg *Config, tokenCache cache.TokenCache, credentialsFut } authorizationMetadataKey := oauthMetadataProvider.authorizationMetadataKey tokenSource := oauthMetadataProvider.tokenSource - err = func() error { if !tokenCache.TryLock() { tokenCache.CondWait() return nil } - defer tokenCache.Unlock() _, err := tokenCache.PurgeIfEquals(t) if err != nil && !errors.Is(err, cache.ErrNotFound) { @@ -227,7 +238,7 @@ func NewAuthInterceptor(cfg *Config, tokenCache cache.TokenCache, credentialsFut if newErr != nil { errString := fmt.Sprintf("authentication error! Original Error: %v, Auth Error: %v", err, newErr) logger.Errorf(ctx, errString) - return fmt.Errorf(errString) + return fmt.Errorf(errString) //nolint } tokenCache.CondBroadcast() @@ -237,6 +248,7 @@ func NewAuthInterceptor(cfg *Config, tokenCache cache.TokenCache, credentialsFut if err != nil { return err } + return invoker(ctx, method, req, reply, cc, opts...) } } @@ -257,6 +269,7 @@ func NewProxyAuthInterceptor(cfg *Config, proxyCredentialsFuture *PerRPCCredenti } return invoker(ctx, method, req, reply, cc, opts...) } + return err } } diff --git a/flyteidl/clients/go/admin/auth_interceptor_test.go b/flyteidl/clients/go/admin/auth_interceptor_test.go index b03171c825..0dee7428bc 100644 --- a/flyteidl/clients/go/admin/auth_interceptor_test.go +++ b/flyteidl/clients/go/admin/auth_interceptor_test.go @@ -2,17 +2,16 @@ package admin import ( "context" - "encoding/json" "errors" "fmt" "io" "net" "net/http" "net/url" - "os" "strings" "sync" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -24,7 +23,7 @@ import ( "github.com/flyteorg/flyte/flyteidl/clients/go/admin/cache/mocks" adminMocks "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" - + "github.com/flyteorg/flyte/flyteidl/clients/go/admin/utils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" "github.com/flyteorg/flyte/flytestdlib/config" "github.com/flyteorg/flyte/flytestdlib/logger" @@ -137,10 +136,7 @@ func newAuthMetadataServer(t testing.TB, grpcPort int, httpPort int, impl servic } func Test_newAuthInterceptor(t *testing.T) { - plan, _ := os.ReadFile("tokenorchestrator/testdata/token.json") - var tokenData oauth2.Token - err := json.Unmarshal(plan, &tokenData) - assert.NoError(t, err) + tokenData := utils.GenTokenWithCustomExpiry(t, time.Now().Add(20*time.Minute)) t.Run("Other Error", func(t *testing.T) { ctx := context.Background() httpPort := rand.IntnRange(10000, 60000) @@ -164,7 +160,8 @@ func Test_newAuthInterceptor(t *testing.T) { f := NewPerRPCCredentialsFuture() p := NewPerRPCCredentialsFuture() mockTokenCache := &mocks.TokenCache{} - mockTokenCache.OnGetTokenMatch().Return(&tokenData, nil) + + mockTokenCache.OnGetTokenMatch().Return(tokenData, nil) mockTokenCache.OnSaveTokenMatch(mock.Anything).Return(nil) interceptor := NewAuthInterceptor(&Config{ Endpoint: config.URL{URL: *u}, diff --git a/flyteidl/clients/go/admin/client.go b/flyteidl/clients/go/admin/client.go index 9f14d49dee..6f6b5d46fb 100644 --- a/flyteidl/clients/go/admin/client.go +++ b/flyteidl/clients/go/admin/client.go @@ -73,7 +73,7 @@ func GetAdditionalAdminClientConfigOptions(cfg *Config) []grpc.DialOption { opts = append(opts, grpc.WithBackoffConfig(backoffConfig)) timeoutDialOption := grpcRetry.WithPerRetryTimeout(cfg.PerRetryTimeout.Duration) - maxRetriesOption := grpcRetry.WithMax(uint(cfg.MaxRetries)) + maxRetriesOption := grpcRetry.WithMax(uint(cfg.MaxRetries)) // #nosec G115 retryInterceptor := grpcRetry.UnaryClientInterceptor(timeoutDialOption, maxRetriesOption) // We only make unary calls in this client, no streaming calls. We can add a streaming interceptor if admin @@ -101,7 +101,7 @@ func getAuthenticationDialOption(ctx context.Context, cfg *Config, tokenSourcePr if err != nil { return nil, fmt.Errorf("failed to fetch client metadata. Error: %v", err) } - authorizationMetadataKey = clientMetadata.AuthorizationMetadataKey + authorizationMetadataKey = clientMetadata.GetAuthorizationMetadataKey() } tokenSource, err := tokenSourceProvider.GetTokenSource(ctx) @@ -157,7 +157,7 @@ func NewAdminConnection(ctx context.Context, cfg *Config, proxyCredentialsFuture opts = append(opts, GetAdditionalAdminClientConfigOptions(cfg)...) - if cfg.ProxyCommand != nil && len(cfg.ProxyCommand) > 0 { + if len(cfg.ProxyCommand) > 0 { opts = append(opts, grpc.WithChainUnaryInterceptor(NewProxyAuthInterceptor(cfg, proxyCredentialsFuture))) opts = append(opts, grpc.WithPerRPCCredentials(proxyCredentialsFuture)) } diff --git a/flyteidl/clients/go/admin/client_test.go b/flyteidl/clients/go/admin/client_test.go index 042a826692..e61f066c26 100644 --- a/flyteidl/clients/go/admin/client_test.go +++ b/flyteidl/clients/go/admin/client_test.go @@ -2,13 +2,10 @@ package admin import ( "context" - "encoding/json" "errors" "fmt" - "io/ioutil" "net/http" "net/url" - "os" "testing" "time" @@ -24,6 +21,7 @@ import ( "github.com/flyteorg/flyte/flyteidl/clients/go/admin/oauth" "github.com/flyteorg/flyte/flyteidl/clients/go/admin/pkce" "github.com/flyteorg/flyte/flyteidl/clients/go/admin/tokenorchestrator" + "github.com/flyteorg/flyte/flyteidl/clients/go/admin/utils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" "github.com/flyteorg/flyte/flytestdlib/config" "github.com/flyteorg/flyte/flytestdlib/logger" @@ -231,15 +229,11 @@ func TestGetAuthenticationDialOptionPkce(t *testing.T) { RedirectUri: "http://localhost:54545/callback", } http.DefaultServeMux = http.NewServeMux() - plan, _ := os.ReadFile("tokenorchestrator/testdata/token.json") - var tokenData oauth2.Token - err := json.Unmarshal(plan, &tokenData) - assert.NoError(t, err) - tokenData.Expiry = time.Now().Add(time.Minute) + tokenData := utils.GenTokenWithCustomExpiry(t, time.Now().Add(time.Minute)) t.Run("cache hit", func(t *testing.T) { mockTokenCache := new(cachemocks.TokenCache) mockAuthClient := new(mocks.AuthMetadataServiceClient) - mockTokenCache.OnGetTokenMatch().Return(&tokenData, nil) + mockTokenCache.OnGetTokenMatch().Return(tokenData, nil) mockTokenCache.OnSaveTokenMatch(mock.Anything).Return(nil) mockAuthClient.OnGetOAuth2MetadataMatch(mock.Anything, mock.Anything).Return(metadata, nil) mockAuthClient.OnGetPublicClientConfigMatch(mock.Anything, mock.Anything).Return(clientMetatadata, nil) @@ -249,11 +243,11 @@ func TestGetAuthenticationDialOptionPkce(t *testing.T) { assert.NotNil(t, dialOption) assert.Nil(t, err) }) - tokenData.Expiry = time.Now().Add(-time.Minute) t.Run("cache miss auth failure", func(t *testing.T) { + tokenData = utils.GenTokenWithCustomExpiry(t, time.Now().Add(-time.Minute)) mockTokenCache := new(cachemocks.TokenCache) mockAuthClient := new(mocks.AuthMetadataServiceClient) - mockTokenCache.OnGetTokenMatch().Return(&tokenData, nil) + mockTokenCache.OnGetTokenMatch().Return(tokenData, nil) mockTokenCache.OnSaveTokenMatch(mock.Anything).Return(nil) mockTokenCache.On("Lock").Return() mockTokenCache.On("Unlock").Return() @@ -284,14 +278,11 @@ func Test_getPkceAuthTokenSource(t *testing.T) { mockAuthClient.OnGetPublicClientConfigMatch(mock.Anything, mock.Anything).Return(clientMetatadata, nil) t.Run("cached token expired", func(t *testing.T) { - plan, _ := ioutil.ReadFile("tokenorchestrator/testdata/token.json") - var tokenData oauth2.Token - err := json.Unmarshal(plan, &tokenData) - assert.NoError(t, err) + tokenData := utils.GenTokenWithCustomExpiry(t, time.Now().Add(-time.Minute)) // populate the cache tokenCache := cache.NewTokenCacheInMemoryProvider() - assert.NoError(t, tokenCache.SaveToken(&tokenData)) + assert.NoError(t, tokenCache.SaveToken(tokenData)) baseOrchestrator := tokenorchestrator.BaseTokenOrchestrator{ ClientConfig: &oauth.Config{ diff --git a/flyteidl/clients/go/admin/oauth/config.go b/flyteidl/clients/go/admin/oauth/config.go index f0a8b9afa4..94055f678a 100644 --- a/flyteidl/clients/go/admin/oauth/config.go +++ b/flyteidl/clients/go/admin/oauth/config.go @@ -30,16 +30,16 @@ func BuildConfigFromMetadataService(ctx context.Context, authMetadataClient serv clientConf = &Config{ Config: &oauth2.Config{ - ClientID: clientResp.ClientId, - RedirectURL: clientResp.RedirectUri, - Scopes: clientResp.Scopes, + ClientID: clientResp.GetClientId(), + RedirectURL: clientResp.GetRedirectUri(), + Scopes: clientResp.GetScopes(), Endpoint: oauth2.Endpoint{ - TokenURL: oauthMetaResp.TokenEndpoint, - AuthURL: oauthMetaResp.AuthorizationEndpoint, + TokenURL: oauthMetaResp.GetTokenEndpoint(), + AuthURL: oauthMetaResp.GetAuthorizationEndpoint(), }, }, - DeviceEndpoint: oauthMetaResp.DeviceAuthorizationEndpoint, - Audience: clientResp.Audience, + DeviceEndpoint: oauthMetaResp.GetDeviceAuthorizationEndpoint(), + Audience: clientResp.GetAudience(), } return clientConf, nil diff --git a/flyteidl/clients/go/admin/token_source_provider.go b/flyteidl/clients/go/admin/token_source_provider.go index 4ecfa59215..b0ab0ce3e1 100644 --- a/flyteidl/clients/go/admin/token_source_provider.go +++ b/flyteidl/clients/go/admin/token_source_provider.go @@ -20,6 +20,7 @@ import ( "github.com/flyteorg/flyte/flyteidl/clients/go/admin/externalprocess" "github.com/flyteorg/flyte/flyteidl/clients/go/admin/pkce" "github.com/flyteorg/flyte/flyteidl/clients/go/admin/tokenorchestrator" + "github.com/flyteorg/flyte/flyteidl/clients/go/admin/utils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" "github.com/flyteorg/flyte/flytestdlib/logger" ) @@ -53,7 +54,7 @@ func NewTokenSourceProvider(ctx context.Context, cfg *Config, tokenCache cache.T return nil, fmt.Errorf("failed to fetch auth metadata. Error: %v", err) } - tokenURL = metadata.TokenEndpoint + tokenURL = metadata.GetTokenEndpoint() } scopes := cfg.Scopes @@ -66,11 +67,11 @@ func NewTokenSourceProvider(ctx context.Context, cfg *Config, tokenCache cache.T } // Update scopes from publicClientConfig if len(scopes) == 0 { - scopes = publicClientConfig.Scopes + scopes = publicClientConfig.GetScopes() } // Update audience from publicClientConfig if cfg.UseAudienceFromAdmin { - audienceValue = publicClientConfig.Audience + audienceValue = publicClientConfig.GetAudience() } } @@ -229,8 +230,14 @@ func (s *customTokenSource) Token() (*oauth2.Token, error) { s.mu.Lock() defer s.mu.Unlock() - if token, err := s.tokenCache.GetToken(); err == nil && token.Valid() { - return token, nil + token, err := s.tokenCache.GetToken() + if err != nil { + logger.Warnf(s.ctx, "failed to get token from cache: %v", err) + } else { + if isValid := utils.Valid(token); isValid { + logger.Infof(context.Background(), "retrieved token from cache with expiry %v", token.Expiry) + return token, nil + } } totalAttempts := s.cfg.MaxRetries + 1 // Add one for initial request attempt @@ -238,19 +245,21 @@ func (s *customTokenSource) Token() (*oauth2.Token, error) { Duration: s.cfg.PerRetryTimeout.Duration, Steps: totalAttempts, } - var token *oauth2.Token - err := retry.OnError(backoff, func(err error) bool { + + err = retry.OnError(backoff, func(err error) bool { return err != nil }, func() (err error) { token, err = s.new.Token() if err != nil { - logger.Infof(s.ctx, "failed to get token: %w", err) - return fmt.Errorf("failed to get token: %w", err) + logger.Infof(s.ctx, "failed to get new token: %w", err) + return fmt.Errorf("failed to get new token: %w", err) } + logger.Infof(context.Background(), "Fetched new token with expiry %v", token.Expiry) return nil }) if err != nil { - return nil, err + logger.Warnf(s.ctx, "failed to get new token: %v", err) + return nil, fmt.Errorf("failed to get new token: %w", err) } logger.Infof(s.ctx, "retrieved token with expiry %v", token.Expiry) @@ -262,6 +271,29 @@ func (s *customTokenSource) Token() (*oauth2.Token, error) { return token, nil } +type InMemoryTokenSourceProvider struct { + tokenCache cache.TokenCache +} + +func NewInMemoryTokenSourceProvider(tokenCache cache.TokenCache) TokenSourceProvider { + return InMemoryTokenSourceProvider{tokenCache: tokenCache} +} + +func (i InMemoryTokenSourceProvider) GetTokenSource(ctx context.Context) (oauth2.TokenSource, error) { + return GetInMemoryAuthTokenSource(ctx, i.tokenCache) +} + +// GetInMemoryAuthTokenSource Returns the token source with cached token +func GetInMemoryAuthTokenSource(ctx context.Context, tokenCache cache.TokenCache) (oauth2.TokenSource, error) { + authToken, err := tokenCache.GetToken() + if err != nil { + return nil, err + } + return &pkce.SimpleTokenSource{ + CachedToken: authToken, + }, nil +} + type DeviceFlowTokenSourceProvider struct { tokenOrchestrator deviceflow.TokenOrchestrator } diff --git a/flyteidl/clients/go/admin/token_source_provider_test.go b/flyteidl/clients/go/admin/token_source_provider_test.go index 43d0fdd928..941b697e75 100644 --- a/flyteidl/clients/go/admin/token_source_provider_test.go +++ b/flyteidl/clients/go/admin/token_source_provider_test.go @@ -13,6 +13,7 @@ import ( tokenCacheMocks "github.com/flyteorg/flyte/flyteidl/clients/go/admin/cache/mocks" adminMocks "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" + "github.com/flyteorg/flyte/flyteidl/clients/go/admin/utils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" ) @@ -88,9 +89,9 @@ func TestCustomTokenSource_Token(t *testing.T) { minuteAgo := time.Now().Add(-time.Minute) hourAhead := time.Now().Add(time.Hour) twoHourAhead := time.Now().Add(2 * time.Hour) - invalidToken := oauth2.Token{AccessToken: "foo", Expiry: minuteAgo} - validToken := oauth2.Token{AccessToken: "foo", Expiry: hourAhead} - newToken := oauth2.Token{AccessToken: "foo", Expiry: twoHourAhead} + invalidToken := utils.GenTokenWithCustomExpiry(t, minuteAgo) + validToken := utils.GenTokenWithCustomExpiry(t, hourAhead) + newToken := utils.GenTokenWithCustomExpiry(t, twoHourAhead) tests := []struct { name string @@ -101,24 +102,24 @@ func TestCustomTokenSource_Token(t *testing.T) { { name: "no cached token", token: nil, - newToken: &newToken, - expectedToken: &newToken, + newToken: newToken, + expectedToken: newToken, }, { name: "cached token valid", - token: &validToken, + token: validToken, newToken: nil, - expectedToken: &validToken, + expectedToken: validToken, }, { name: "cached token expired", - token: &invalidToken, - newToken: &newToken, - expectedToken: &newToken, + token: invalidToken, + newToken: newToken, + expectedToken: newToken, }, { name: "failed new token", - token: &invalidToken, + token: invalidToken, newToken: nil, expectedToken: nil, }, @@ -138,7 +139,7 @@ func TestCustomTokenSource_Token(t *testing.T) { assert.True(t, ok) mockSource := &adminMocks.TokenSource{} - if test.token != &validToken { + if test.token != validToken { if test.newToken != nil { mockSource.OnToken().Return(test.newToken, nil) } else { diff --git a/flyteidl/clients/go/admin/tokenorchestrator/base_token_orchestrator.go b/flyteidl/clients/go/admin/tokenorchestrator/base_token_orchestrator.go index 4fd3fa476c..441127ce07 100644 --- a/flyteidl/clients/go/admin/tokenorchestrator/base_token_orchestrator.go +++ b/flyteidl/clients/go/admin/tokenorchestrator/base_token_orchestrator.go @@ -8,6 +8,7 @@ import ( "github.com/flyteorg/flyte/flyteidl/clients/go/admin/cache" "github.com/flyteorg/flyte/flyteidl/clients/go/admin/oauth" + "github.com/flyteorg/flyte/flyteidl/clients/go/admin/utils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" "github.com/flyteorg/flyte/flytestdlib/config" "github.com/flyteorg/flyte/flytestdlib/logger" @@ -52,7 +53,8 @@ func (t BaseTokenOrchestrator) FetchTokenFromCacheOrRefreshIt(ctx context.Contex return nil, err } - if token.Valid() { + if isValid := utils.Valid(token); isValid { + logger.Infof(context.Background(), "retrieved token from cache with expiry %v", token.Expiry) return token, nil } diff --git a/flyteidl/clients/go/admin/tokenorchestrator/base_token_orchestrator_test.go b/flyteidl/clients/go/admin/tokenorchestrator/base_token_orchestrator_test.go index 0a1a9f4985..d7e5ca07b2 100644 --- a/flyteidl/clients/go/admin/tokenorchestrator/base_token_orchestrator_test.go +++ b/flyteidl/clients/go/admin/tokenorchestrator/base_token_orchestrator_test.go @@ -2,8 +2,6 @@ package tokenorchestrator import ( "context" - "encoding/json" - "os" "testing" "time" @@ -15,6 +13,7 @@ import ( cacheMocks "github.com/flyteorg/flyte/flyteidl/clients/go/admin/cache/mocks" "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" "github.com/flyteorg/flyte/flyteidl/clients/go/admin/oauth" + "github.com/flyteorg/flyte/flyteidl/clients/go/admin/utils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" "github.com/flyteorg/flyte/flytestdlib/config" ) @@ -32,12 +31,9 @@ func TestRefreshTheToken(t *testing.T) { TokenCache: tokenCacheProvider, } - plan, _ := os.ReadFile("testdata/token.json") - var tokenData oauth2.Token - err := json.Unmarshal(plan, &tokenData) - assert.Nil(t, err) t.Run("bad url in Config", func(t *testing.T) { - refreshedToken, err := orchestrator.RefreshToken(ctx, &tokenData) + tokenData := utils.GenTokenWithCustomExpiry(t, time.Now().Add(-20*time.Minute)) + refreshedToken, err := orchestrator.RefreshToken(ctx, tokenData) assert.Nil(t, refreshedToken) assert.NotNil(t, err) }) @@ -72,12 +68,8 @@ func TestFetchFromCache(t *testing.T) { tokenCacheProvider := cache.NewTokenCacheInMemoryProvider() orchestrator, err := NewBaseTokenOrchestrator(ctx, tokenCacheProvider, mockAuthClient) assert.NoError(t, err) - fileData, _ := os.ReadFile("testdata/token.json") - var tokenData oauth2.Token - err = json.Unmarshal(fileData, &tokenData) - assert.Nil(t, err) - tokenData.Expiry = time.Now().Add(20 * time.Minute) - err = tokenCacheProvider.SaveToken(&tokenData) + tokenData := utils.GenTokenWithCustomExpiry(t, time.Now().Add(20*time.Minute)) + err = tokenCacheProvider.SaveToken(tokenData) assert.Nil(t, err) cachedToken, err := orchestrator.FetchTokenFromCacheOrRefreshIt(ctx, config.Duration{Duration: 5 * time.Minute}) assert.Nil(t, err) @@ -89,12 +81,8 @@ func TestFetchFromCache(t *testing.T) { tokenCacheProvider := cache.NewTokenCacheInMemoryProvider() orchestrator, err := NewBaseTokenOrchestrator(ctx, tokenCacheProvider, mockAuthClient) assert.NoError(t, err) - fileData, _ := os.ReadFile("testdata/token.json") - var tokenData oauth2.Token - err = json.Unmarshal(fileData, &tokenData) - assert.Nil(t, err) - tokenData.Expiry = time.Now().Add(-20 * time.Minute) - err = tokenCacheProvider.SaveToken(&tokenData) + tokenData := utils.GenTokenWithCustomExpiry(t, time.Now().Add(-20*time.Minute)) + err = tokenCacheProvider.SaveToken(tokenData) assert.Nil(t, err) _, err = orchestrator.FetchTokenFromCacheOrRefreshIt(ctx, config.Duration{Duration: 5 * time.Minute}) assert.NotNil(t, err) @@ -104,12 +92,8 @@ func TestFetchFromCache(t *testing.T) { mockTokenCacheProvider := new(cacheMocks.TokenCache) orchestrator, err := NewBaseTokenOrchestrator(ctx, mockTokenCacheProvider, mockAuthClient) assert.NoError(t, err) - fileData, _ := os.ReadFile("testdata/token.json") - var tokenData oauth2.Token - err = json.Unmarshal(fileData, &tokenData) - assert.Nil(t, err) - tokenData.Expiry = time.Now().Add(20 * time.Minute) - mockTokenCacheProvider.OnGetTokenMatch(mock.Anything).Return(&tokenData, nil) + tokenData := utils.GenTokenWithCustomExpiry(t, time.Now().Add(20*time.Minute)) + mockTokenCacheProvider.OnGetTokenMatch(mock.Anything).Return(tokenData, nil) mockTokenCacheProvider.OnSaveTokenMatch(mock.Anything).Return(nil) assert.Nil(t, err) refreshedToken, err := orchestrator.FetchTokenFromCacheOrRefreshIt(ctx, config.Duration{Duration: 5 * time.Minute}) @@ -122,12 +106,8 @@ func TestFetchFromCache(t *testing.T) { mockTokenCacheProvider := new(cacheMocks.TokenCache) orchestrator, err := NewBaseTokenOrchestrator(ctx, mockTokenCacheProvider, mockAuthClient) assert.NoError(t, err) - fileData, _ := os.ReadFile("testdata/token.json") - var tokenData oauth2.Token - err = json.Unmarshal(fileData, &tokenData) - assert.Nil(t, err) - tokenData.Expiry = time.Now().Add(20 * time.Minute) - mockTokenCacheProvider.OnGetTokenMatch(mock.Anything).Return(&tokenData, nil) + tokenData := utils.GenTokenWithCustomExpiry(t, time.Now().Add(20*time.Minute)) + mockTokenCacheProvider.OnGetTokenMatch(mock.Anything).Return(tokenData, nil) assert.Nil(t, err) refreshedToken, err := orchestrator.FetchTokenFromCacheOrRefreshIt(ctx, config.Duration{Duration: 5 * time.Minute}) assert.Nil(t, err) diff --git a/flyteidl/clients/go/admin/tokenorchestrator/testdata/token.json b/flyteidl/clients/go/admin/tokenorchestrator/testdata/token.json deleted file mode 100644 index 721cecc5f6..0000000000 --- a/flyteidl/clients/go/admin/tokenorchestrator/testdata/token.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "access_token":"eyJhbGciOiJSUzI1NiIsImtleV9pZCI6IjlLZlNILXphZjRjY1dmTlNPbm91YmZUbnItVW5kMHVuY3ctWF9KNUJVdWciLCJ0eXAiOiJKV1QifQ.eyJhdWQiOlsiaHR0cHM6Ly9kZW1vLm51Y2x5ZGUuaW8iXSwiY2xpZW50X2lkIjoiZmx5dGVjdGwiLCJleHAiOjE2MTk1Mjk5MjcsImZvcm0iOnsiY29kZV9jaGFsbGVuZ2UiOiJ2bWNxazArZnJRS3Vvb2FMUHZwUDJCeUtod2VKR2VaeG1mdGtkMml0T042Tk13SVBQNWwySmNpWDd3NTdlaS9iVW1LTWhPSjJVUERnK0F5RXRaTG94SFJiMDl1cWRKSSIsImNvZGVfY2hhbGxlbmdlX21ldGhvZCI6IlN2WEgyeDh2UDUrSkJxQ0NjT2dCL0hNWjdLSmE3bkdLMDBaUVA0ekd4WGcifSwiaWF0IjoxNjE5NTAyNTM1LCJpc3MiOiJodHRwczovL2RlbW8ubnVjbHlkZS5pbyIsImp0aSI6IjQzMTM1ZWY2LTA5NjEtNGFlZC1hOTYxLWQyZGI1YWJmM2U1YyIsInNjcCI6WyJvZmZsaW5lIiwiYWxsIiwiYWNjZXNzX3Rva2VuIl0sInN1YiI6IjExNDUyNzgxNTMwNTEyODk3NDQ3MCIsInVzZXJfaW5mbyI6eyJmYW1pbHlfbmFtZSI6Ik1haGluZHJha2FyIiwiZ2l2ZW5fbmFtZSI6IlByYWZ1bGxhIiwibmFtZSI6IlByYWZ1bGxhIE1haGluZHJha2FyIiwicGljdHVyZSI6Imh0dHBzOi8vbGgzLmdvb2dsZXVzZXJjb250ZW50LmNvbS9hLS9BT2gxNEdqdVQxazgtOGE1dkJHT0lGMWFEZ2hZbUZ4OGhEOUtOaVI1am5adT1zOTYtYyIsInN1YmplY3QiOiIxMTQ1Mjc4MTUzMDUxMjg5NzQ0NzAifX0.ojbUOy2tF6HL8fIp1FJAQchU2MimlVMr3EGVPxMvYyahpW5YsWh6mz7qn4vpEnBuYZDf6cTaN50pJ8krlDX9RqtxF3iEfV2ZYHwyKMThI9sWh_kEBgGwUpyHyk98ZeqQX1uFOH3iwwhR-lPPUlpgdFGzKsxfxeFLOtu1y0V7BgA08KFqgYzl0lJqDYWBkJh_wUAv5g_r0NzSQCsMqb-B3Lno5ScMnlA3SZ_Hg-XdW8hnFIlrwJj4Cv47j3fcZxpqLbTNDXWWogmRbJb3YPlgn_LEnRAyZnFERHKMCE9vaBSTu-1Qstp-gRTORjyV7l3y680dEygQS-99KV3OSBlz6g", - "token_type":"bearer", - "refresh_token":"eyJhbGciOiJSUzI1NiIsImtleV9pZCI6IjlLZlNILXphZjRjY1dmTlNPbm91YmZUbnItVW5kMHVuY3ctWF9KNUJVdWciLCJ0eXAiOiJKV1QifQ.eyJhdWQiOlsiaHR0cHM6Ly9kZW1vLm51Y2x5ZGUuaW8iXSwiY2xpZW50X2lkIjoiZmx5dGVjdGwiLCJleHAiOjE2MTk1MzM1MjcsImZvcm0iOnsiY29kZV9jaGFsbGVuZ2UiOiJ2bWNxazArZnJRS3Vvb2FMUHZwUDJCeUtod2VKR2VaeG1mdGtkMml0T042Tk13SVBQNWwySmNpWDd3NTdlaS9iVW1LTWhPSjJVUERnK0F5RXRaTG94SFJiMDl1cWRKSSIsImNvZGVfY2hhbGxlbmdlX21ldGhvZCI6IlN2WEgyeDh2UDUrSkJxQ0NjT2dCL0hNWjdLSmE3bkdLMDBaUVA0ekd4WGcifSwiaWF0IjoxNjE5NTAyNTM1LCJpc3MiOiJodHRwczovL2RlbW8ubnVjbHlkZS5pbyIsImp0aSI6IjQzMTM1ZWY2LTA5NjEtNGFlZC1hOTYxLWQyZGI1YWJmM2U1YyIsInNjcCI6WyJvZmZsaW5lIiwiZi5hbGwiLCJhY2Nlc3NfdG9rZW4iXSwic3ViIjoiMTE0NTI3ODE1MzA1MTI4OTc0NDcwIiwidXNlcl9pbmZvIjp7ImZhbWlseV9uYW1lIjoiTWFoaW5kcmFrYXIiLCJnaXZlbl9uYW1lIjoiUHJhZnVsbGEiLCJuYW1lIjoiUHJhZnVsbGEgTWFoaW5kcmFrYXIiLCJwaWN0dXJlIjoiaHR0cHM6Ly9saDMuZ29vZ2xldXNlcmNvbnRlbnQuY29tL2EtL0FPaDE0R2p1VDFrOC04YTV2QkdPSUYxYURnaFltRng4aEQ5S05pUjVqblp1PXM5Ni1jIiwic3ViamVjdCI6IjExNDUyNzgxNTMwNTEyODk3NDQ3MCJ9fQ.YKom5-gE4e84rJJIfxcpbMzgjZT33UZ27UTa1y8pK2BAWaPjIZtwudwDHQ5Rd3m0mJJWhBp0j0e8h9DvzBUdpsnGMXSCYKP-ag9y9k5OW59FMm9RqIakWHtj6NPnxGO1jAsaNCYePj8knR7pBLCLCse2taDHUJ8RU1F0DeHNr2y-JupgG5y1vjBcb-9eD8OwOSTp686_hm7XoJlxiKx8dj2O7HPH7M2pAHA_0bVrKKj7Y_s3fRhkm_Aq6LRdA-IiTl9xJQxgVUreejls9-RR9mSTKj6A81-Isz3qAUttVVaA4OT5OdW879_yT7OSLw_QwpXzNZ7qOR7OIpmL_xZXig", - "expiry":"2021-04-27T19:55:26.658635+05:30" -} \ No newline at end of file diff --git a/flyteidl/clients/go/admin/utils/test_utils.go b/flyteidl/clients/go/admin/utils/test_utils.go new file mode 100644 index 0000000000..000bbbebba --- /dev/null +++ b/flyteidl/clients/go/admin/utils/test_utils.go @@ -0,0 +1,24 @@ +package utils + +import ( + "testing" + "time" + + "github.com/golang-jwt/jwt/v5" + "github.com/stretchr/testify/assert" + "golang.org/x/oauth2" +) + +func GenTokenWithCustomExpiry(t *testing.T, expiry time.Time) *oauth2.Token { + var signingKey = []byte("your_secret_key") + token := jwt.New(jwt.SigningMethodHS256) + claims := token.Claims.(jwt.MapClaims) + claims["exp"] = expiry.Unix() + tokenString, err := token.SignedString(signingKey) + assert.NoError(t, err) + return &oauth2.Token{ + AccessToken: tokenString, + Expiry: expiry, + TokenType: "bearer", + } +} diff --git a/flyteidl/clients/go/admin/utils/token_utils.go b/flyteidl/clients/go/admin/utils/token_utils.go new file mode 100644 index 0000000000..8c34cef00e --- /dev/null +++ b/flyteidl/clients/go/admin/utils/token_utils.go @@ -0,0 +1,52 @@ +package utils + +import ( + "context" + "fmt" + "time" + + "github.com/golang-jwt/jwt/v5" + "golang.org/x/oauth2" + + "github.com/flyteorg/flyte/flytestdlib/logger" +) + +// Ref : Taken from oAuth library implementation of expiry +// defaultExpiryDelta determines how earlier a token should be considered +// expired than its actual expiration time. It is used to avoid late +// expirations due to client-server time mismatches. +const defaultExpiryDelta = 10 * time.Second + +// Valid reports whether t is non-nil, has an AccessToken, and is not expired. +func Valid(t *oauth2.Token) bool { + if t == nil || t.AccessToken == "" { + return false + } + expiryDelta := defaultExpiryDelta + tokenExpiry, err := parseDateClaim(t.AccessToken) + if err != nil { + logger.Errorf(context.Background(), "parseDateClaim failed due to %v", err) + return false + } + logger.Debugf(context.Background(), "Token expiry : %v, Access token expiry : %v, Are the equal : %v", t.Expiry, tokenExpiry, tokenExpiry.Equal(t.Expiry)) + return !tokenExpiry.Add(-expiryDelta).Before(time.Now()) +} + +// parseDateClaim parses the JWT token string and extracts the expiration time +func parseDateClaim(tokenString string) (time.Time, error) { + // Parse the token + token, _, err := new(jwt.Parser).ParseUnverified(tokenString, jwt.MapClaims{}) + if err != nil { + return time.Time{}, err + } + + // Extract the claims + if claims, ok := token.Claims.(jwt.MapClaims); ok { + // Get the expiration time + if exp, ok := claims["exp"].(float64); ok { + return time.Unix(int64(exp), 0), nil + } + } + + return time.Time{}, fmt.Errorf("no expiration claim found in token") +} diff --git a/flyteidl/clients/go/assets/admin.swagger.json b/flyteidl/clients/go/assets/admin.swagger.json index 241baeb53c..01ae020a09 100644 --- a/flyteidl/clients/go/assets/admin.swagger.json +++ b/flyteidl/clients/go/assets/admin.swagger.json @@ -5705,6 +5705,10 @@ "is_array": { "type": "boolean", "description": "Boolean flag indicating if the node is an array node. This is intended to uniquely identify\narray nodes from other nodes which can have is_parent_node as true." + }, + "is_eager": { + "type": "boolean", + "description": "Whether this node is an eager node." } }, "title": "Represents additional attributes related to a Node Execution" @@ -6560,6 +6564,10 @@ "execution_mode": { "$ref": "#/definitions/coreArrayNodeExecutionMode", "description": "execution_mode determines the execution path for ArrayNode." + }, + "is_original_sub_node_interface": { + "type": "boolean", + "title": "Indicates whether the sub node's original interface was altered" } }, "description": "ArrayNode is a Flyte node type that simplifies the execution of a sub-node over a list of input\nvalues. An ArrayNode can be executed with configurable parallelism (separate from the parent\nworkflow) and can be configured to succeed when a certain number of sub-nodes succeed." @@ -7153,6 +7161,15 @@ }, "kind": { "$ref": "#/definitions/ExecutionErrorErrorKind" + }, + "timestamp": { + "type": "string", + "format": "date-time", + "title": "Timestamp of the error" + }, + "worker": { + "type": "string", + "title": "Worker that generated the error" } }, "description": "Represents the error message from the execution." @@ -8260,6 +8277,10 @@ "type": "string" }, "description": "cache_ignore_input_vars is the input variables that should not be included when calculating hash for cache." + }, + "is_eager": { + "type": "boolean", + "description": "is_eager indicates whether the task is eager or not.\nThis would be used by CreateTask endpoint." } }, "title": "Task Metadata" @@ -8755,6 +8776,10 @@ "is_in_dynamic_chain": { "type": "boolean", "description": "Tasks and subworkflows (but not launch plans) that are run within a dynamic task are effectively independent of\nthe tasks that are registered in Admin's db. Confusingly, they are often identical, but sometimes they are not\neven registered at all. Similar to the target_entity field, at the time Admin receives this event, it has no idea\nif the relevant execution entity is was registered, or dynamic. This field indicates that the target_entity ID,\nas well as task IDs in any corresponding Task Executions, should not be used to looked up the task in Admin's db." + }, + "is_eager": { + "type": "boolean", + "description": "Whether this node launched an eager task." } } }, diff --git a/flyteidl/clients/go/coreutils/extract_literal.go b/flyteidl/clients/go/coreutils/extract_literal.go index 23302de9a3..08e534c5b7 100644 --- a/flyteidl/clients/go/coreutils/extract_literal.go +++ b/flyteidl/clients/go/coreutils/extract_literal.go @@ -28,11 +28,11 @@ import ( ) func ExtractFromLiteral(literal *core.Literal) (interface{}, error) { - switch literalValue := literal.Value.(type) { + switch literalValue := literal.GetValue().(type) { case *core.Literal_Scalar: - switch scalarValue := literalValue.Scalar.Value.(type) { + switch scalarValue := literalValue.Scalar.GetValue().(type) { case *core.Scalar_Primitive: - switch scalarPrimitive := scalarValue.Primitive.Value.(type) { + switch scalarPrimitive := scalarValue.Primitive.GetValue().(type) { case *core.Primitive_Integer: scalarPrimitiveInt := scalarPrimitive.Integer return scalarPrimitiveInt, nil @@ -57,16 +57,16 @@ func ExtractFromLiteral(literal *core.Literal) (interface{}, error) { case *core.Scalar_Binary: return scalarValue.Binary, nil case *core.Scalar_Blob: - return scalarValue.Blob.Uri, nil + return scalarValue.Blob.GetUri(), nil case *core.Scalar_Schema: - return scalarValue.Schema.Uri, nil + return scalarValue.Schema.GetUri(), nil case *core.Scalar_Generic: return scalarValue.Generic, nil case *core.Scalar_StructuredDataset: - return scalarValue.StructuredDataset.Uri, nil + return scalarValue.StructuredDataset.GetUri(), nil case *core.Scalar_Union: // extract the value of the union but not the actual union object - extractedVal, err := ExtractFromLiteral(scalarValue.Union.Value) + extractedVal, err := ExtractFromLiteral(scalarValue.Union.GetValue()) if err != nil { return nil, err } @@ -77,7 +77,7 @@ func ExtractFromLiteral(literal *core.Literal) (interface{}, error) { return nil, fmt.Errorf("unsupported literal scalar type %T", scalarValue) } case *core.Literal_Collection: - collectionValue := literalValue.Collection.Literals + collectionValue := literalValue.Collection.GetLiterals() collection := make([]interface{}, len(collectionValue)) for index, val := range collectionValue { if collectionElem, err := ExtractFromLiteral(val); err == nil { @@ -88,7 +88,7 @@ func ExtractFromLiteral(literal *core.Literal) (interface{}, error) { } return collection, nil case *core.Literal_Map: - mapLiteralValue := literalValue.Map.Literals + mapLiteralValue := literalValue.Map.GetLiterals() mapResult := make(map[string]interface{}, len(mapLiteralValue)) for key, val := range mapLiteralValue { if val, err := ExtractFromLiteral(val); err == nil { @@ -100,7 +100,7 @@ func ExtractFromLiteral(literal *core.Literal) (interface{}, error) { return mapResult, nil case *core.Literal_OffloadedMetadata: // Return the URI of the offloaded metadata to be used when displaying in flytectl - return literalValue.OffloadedMetadata.Uri, nil + return literalValue.OffloadedMetadata.GetUri(), nil } return nil, fmt.Errorf("unsupported literal type %T", literal) diff --git a/flyteidl/clients/go/coreutils/extract_literal_test.go b/flyteidl/clients/go/coreutils/extract_literal_test.go index 66b20439c2..9d6e035775 100644 --- a/flyteidl/clients/go/coreutils/extract_literal_test.go +++ b/flyteidl/clients/go/coreutils/extract_literal_test.go @@ -4,6 +4,7 @@ package coreutils import ( + "os" "testing" "time" @@ -125,6 +126,7 @@ func TestFetchLiteral(t *testing.T) { }) t.Run("Generic", func(t *testing.T) { + os.Setenv(FlyteUseOldDcFormat, "true") literalVal := map[string]interface{}{ "x": 1, "y": "ystringvalue", @@ -146,10 +148,11 @@ func TestFetchLiteral(t *testing.T) { Fields: fieldsMap, } extractedStructValue := extractedLiteralVal.(*structpb.Struct) - assert.Equal(t, len(expectedStructVal.Fields), len(extractedStructValue.Fields)) - for key, val := range expectedStructVal.Fields { - assert.Equal(t, val.Kind, extractedStructValue.Fields[key].Kind) + assert.Equal(t, len(expectedStructVal.GetFields()), len(extractedStructValue.GetFields())) + for key, val := range expectedStructVal.GetFields() { + assert.Equal(t, val.GetKind(), extractedStructValue.GetFields()[key].GetKind()) } + os.Unsetenv(FlyteUseOldDcFormat) }) t.Run("Generic Passed As String", func(t *testing.T) { @@ -171,9 +174,9 @@ func TestFetchLiteral(t *testing.T) { Fields: fieldsMap, } extractedStructValue := extractedLiteralVal.(*structpb.Struct) - assert.Equal(t, len(expectedStructVal.Fields), len(extractedStructValue.Fields)) - for key, val := range expectedStructVal.Fields { - assert.Equal(t, val.Kind, extractedStructValue.Fields[key].Kind) + assert.Equal(t, len(expectedStructVal.GetFields()), len(extractedStructValue.GetFields())) + for key, val := range expectedStructVal.GetFields() { + assert.Equal(t, val.GetKind(), extractedStructValue.GetFields()[key].GetKind()) } }) diff --git a/flyteidl/clients/go/coreutils/literals.go b/flyteidl/clients/go/coreutils/literals.go index 2bb789b423..310e389c73 100644 --- a/flyteidl/clients/go/coreutils/literals.go +++ b/flyteidl/clients/go/coreutils/literals.go @@ -5,20 +5,24 @@ import ( "encoding/json" "fmt" "math" + "os" "reflect" "strconv" "strings" "time" - "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" - "github.com/flyteorg/flyte/flytestdlib/storage" "github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/ptypes" structpb "github.com/golang/protobuf/ptypes/struct" "github.com/pkg/errors" + "github.com/shamaton/msgpack/v2" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flyte/flytestdlib/storage" ) const MESSAGEPACK = "msgpack" +const FlyteUseOldDcFormat = "FLYTE_USE_OLD_DC_FORMAT" func MakePrimitive(v interface{}) (*core.Primitive, error) { switch p := v.(type) { @@ -302,20 +306,20 @@ func MakeDefaultLiteralForType(typ *core.LiteralType) (*core.Literal, error) { case *core.LiteralType_Schema: return MakeLiteralForType(typ, nil) case *core.LiteralType_UnionType: - if len(t.UnionType.Variants) == 0 { + if len(t.UnionType.GetVariants()) == 0 { return nil, errors.Errorf("Union type must have at least one variant") } // For union types, we just return the default for the first variant - val, err := MakeDefaultLiteralForType(t.UnionType.Variants[0]) + val, err := MakeDefaultLiteralForType(t.UnionType.GetVariants()[0]) if err != nil { - return nil, errors.Errorf("Failed to create default literal for first union type variant [%v]", t.UnionType.Variants[0]) + return nil, errors.Errorf("Failed to create default literal for first union type variant [%v]", t.UnionType.GetVariants()[0]) } res := &core.Literal{ Value: &core.Literal_Scalar{ Scalar: &core.Scalar{ Value: &core.Scalar_Union{ Union: &core.Union{ - Type: t.UnionType.Variants[0], + Type: t.UnionType.GetVariants()[0], Value: val, }, }, @@ -507,7 +511,7 @@ func MakeLiteralForBlob(path storage.DataReference, isDir bool, format string) * func MakeLiteralForType(t *core.LiteralType, v interface{}) (*core.Literal, error) { l := &core.Literal{} - switch newT := t.Type.(type) { + switch newT := t.GetType().(type) { case *core.LiteralType_MapValueType: newV, ok := v.(map[string]interface{}) if !ok { @@ -561,12 +565,32 @@ func MakeLiteralForType(t *core.LiteralType, v interface{}) (*core.Literal, erro strValue = fmt.Sprintf("%.0f", math.Trunc(f)) } if newT.Simple == core.SimpleType_STRUCT { + useOldFormat := strings.ToLower(os.Getenv(FlyteUseOldDcFormat)) if _, isValueStringType := v.(string); !isValueStringType { - byteValue, err := json.Marshal(v) - if err != nil { - return nil, fmt.Errorf("unable to marshal to json string for struct value %v", v) + if useOldFormat == "1" || useOldFormat == "t" || useOldFormat == "true" { + byteValue, err := json.Marshal(v) + if err != nil { + return nil, fmt.Errorf("unable to marshal to json string for struct value %v", v) + } + strValue = string(byteValue) + } else { + byteValue, err := msgpack.Marshal(v) + if err != nil { + return nil, fmt.Errorf("unable to marshal to msgpack bytes for struct value %v", v) + } + return &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Binary{ + Binary: &core.Binary{ + Value: byteValue, + Tag: MESSAGEPACK, + }, + }, + }, + }, + }, nil } - strValue = string(byteValue) } } lv, err := MakeLiteralForSimpleType(newT.Simple, strValue) @@ -576,24 +600,24 @@ func MakeLiteralForType(t *core.LiteralType, v interface{}) (*core.Literal, erro return lv, nil case *core.LiteralType_Blob: - isDir := newT.Blob.Dimensionality == core.BlobType_MULTIPART - lv := MakeLiteralForBlob(storage.DataReference(fmt.Sprintf("%v", v)), isDir, newT.Blob.Format) + isDir := newT.Blob.GetDimensionality() == core.BlobType_MULTIPART + lv := MakeLiteralForBlob(storage.DataReference(fmt.Sprintf("%v", v)), isDir, newT.Blob.GetFormat()) return lv, nil case *core.LiteralType_Schema: - lv := MakeLiteralForSchema(storage.DataReference(fmt.Sprintf("%v", v)), newT.Schema.Columns) + lv := MakeLiteralForSchema(storage.DataReference(fmt.Sprintf("%v", v)), newT.Schema.GetColumns()) return lv, nil case *core.LiteralType_StructuredDatasetType: - lv := MakeLiteralForStructuredDataSet(storage.DataReference(fmt.Sprintf("%v", v)), newT.StructuredDatasetType.Columns, newT.StructuredDatasetType.Format) + lv := MakeLiteralForStructuredDataSet(storage.DataReference(fmt.Sprintf("%v", v)), newT.StructuredDatasetType.GetColumns(), newT.StructuredDatasetType.GetFormat()) return lv, nil case *core.LiteralType_EnumType: var newV string if v == nil { - if len(t.GetEnumType().Values) == 0 { + if len(t.GetEnumType().GetValues()) == 0 { return nil, fmt.Errorf("enum types need at least one value") } - newV = t.GetEnumType().Values[0] + newV = t.GetEnumType().GetValues()[0] } else { var ok bool newV, ok = v.(string) @@ -616,7 +640,7 @@ func MakeLiteralForType(t *core.LiteralType, v interface{}) (*core.Literal, erro case *core.LiteralType_UnionType: // Try different types in the variants, return the first one matched found := false - for _, subType := range newT.UnionType.Variants { + for _, subType := range newT.UnionType.GetVariants() { lv, err := MakeLiteralForType(subType, v) if err == nil { l = &core.Literal{ @@ -636,7 +660,7 @@ func MakeLiteralForType(t *core.LiteralType, v interface{}) (*core.Literal, erro } } if !found { - return nil, fmt.Errorf("incorrect union value [%s], supported values %+v", v, newT.UnionType.Variants) + return nil, fmt.Errorf("incorrect union value [%s], supported values %+v", v, newT.UnionType.GetVariants()) } default: return nil, fmt.Errorf("unsupported type %s", t.String()) diff --git a/flyteidl/clients/go/coreutils/literals_test.go b/flyteidl/clients/go/coreutils/literals_test.go index 3b5daf4b27..3586e0e02f 100644 --- a/flyteidl/clients/go/coreutils/literals_test.go +++ b/flyteidl/clients/go/coreutils/literals_test.go @@ -5,6 +5,7 @@ package coreutils import ( "fmt" + "os" "reflect" "strconv" "testing" @@ -14,6 +15,7 @@ import ( "github.com/golang/protobuf/ptypes" structpb "github.com/golang/protobuf/ptypes/struct" "github.com/pkg/errors" + "github.com/shamaton/msgpack/v2" "github.com/stretchr/testify/assert" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" @@ -25,42 +27,42 @@ func TestMakePrimitive(t *testing.T) { v := 1 p, err := MakePrimitive(v) assert.NoError(t, err) - assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.Value).String()) + assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.GetValue()).String()) assert.Equal(t, int64(v), p.GetInteger()) } { v := int64(1) p, err := MakePrimitive(v) assert.NoError(t, err) - assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.Value).String()) + assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.GetValue()).String()) assert.Equal(t, v, p.GetInteger()) } { v := 1.0 p, err := MakePrimitive(v) assert.NoError(t, err) - assert.Equal(t, "*core.Primitive_FloatValue", reflect.TypeOf(p.Value).String()) + assert.Equal(t, "*core.Primitive_FloatValue", reflect.TypeOf(p.GetValue()).String()) assert.Equal(t, v, p.GetFloatValue()) } { v := "blah" p, err := MakePrimitive(v) assert.NoError(t, err) - assert.Equal(t, "*core.Primitive_StringValue", reflect.TypeOf(p.Value).String()) + assert.Equal(t, "*core.Primitive_StringValue", reflect.TypeOf(p.GetValue()).String()) assert.Equal(t, v, p.GetStringValue()) } { v := true p, err := MakePrimitive(v) assert.NoError(t, err) - assert.Equal(t, "*core.Primitive_Boolean", reflect.TypeOf(p.Value).String()) + assert.Equal(t, "*core.Primitive_Boolean", reflect.TypeOf(p.GetValue()).String()) assert.Equal(t, v, p.GetBoolean()) } { v := time.Now() p, err := MakePrimitive(v) assert.NoError(t, err) - assert.Equal(t, "*core.Primitive_Datetime", reflect.TypeOf(p.Value).String()) + assert.Equal(t, "*core.Primitive_Datetime", reflect.TypeOf(p.GetValue()).String()) j, err := ptypes.TimestampProto(v) assert.NoError(t, err) assert.Equal(t, j, p.GetDatetime()) @@ -71,7 +73,7 @@ func TestMakePrimitive(t *testing.T) { v := time.Second * 10 p, err := MakePrimitive(v) assert.NoError(t, err) - assert.Equal(t, "*core.Primitive_Duration", reflect.TypeOf(p.Value).String()) + assert.Equal(t, "*core.Primitive_Duration", reflect.TypeOf(p.GetValue()).String()) assert.Equal(t, ptypes.DurationProto(v), p.GetDuration()) } { @@ -93,7 +95,7 @@ func TestMustMakePrimitive(t *testing.T) { { v := time.Second * 10 p := MustMakePrimitive(v) - assert.Equal(t, "*core.Primitive_Duration", reflect.TypeOf(p.Value).String()) + assert.Equal(t, "*core.Primitive_Duration", reflect.TypeOf(p.GetValue()).String()) assert.Equal(t, ptypes.DurationProto(v), p.GetDuration()) } } @@ -104,7 +106,7 @@ func TestMakePrimitiveLiteral(t *testing.T) { p, err := MakePrimitiveLiteral(v) assert.NoError(t, err) assert.NotNil(t, p.GetScalar()) - assert.Equal(t, "*core.Primitive_FloatValue", reflect.TypeOf(p.GetScalar().GetPrimitive().Value).String()) + assert.Equal(t, "*core.Primitive_FloatValue", reflect.TypeOf(p.GetScalar().GetPrimitive().GetValue()).String()) assert.Equal(t, v, p.GetScalar().GetPrimitive().GetFloatValue()) } { @@ -127,7 +129,7 @@ func TestMustMakePrimitiveLiteral(t *testing.T) { v := 1.0 p := MustMakePrimitiveLiteral(v) assert.NotNil(t, p.GetScalar()) - assert.Equal(t, "*core.Primitive_FloatValue", reflect.TypeOf(p.GetScalar().GetPrimitive().Value).String()) + assert.Equal(t, "*core.Primitive_FloatValue", reflect.TypeOf(p.GetScalar().GetPrimitive().GetValue()).String()) assert.Equal(t, v, p.GetScalar().GetPrimitive().GetFloatValue()) }) } @@ -136,14 +138,14 @@ func TestMakeLiteral(t *testing.T) { t.Run("Primitive", func(t *testing.T) { lit, err := MakeLiteral("test_string") assert.NoError(t, err) - assert.Equal(t, "*core.Primitive_StringValue", reflect.TypeOf(lit.GetScalar().GetPrimitive().Value).String()) + assert.Equal(t, "*core.Primitive_StringValue", reflect.TypeOf(lit.GetScalar().GetPrimitive().GetValue()).String()) }) t.Run("Array", func(t *testing.T) { lit, err := MakeLiteral([]interface{}{1, 2, 3}) assert.NoError(t, err) assert.Equal(t, "*core.Literal_Collection", reflect.TypeOf(lit.GetValue()).String()) - assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(lit.GetCollection().Literals[0].GetScalar().GetPrimitive().Value).String()) + assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(lit.GetCollection().GetLiterals()[0].GetScalar().GetPrimitive().GetValue()).String()) }) t.Run("Map", func(t *testing.T) { @@ -153,7 +155,7 @@ func TestMakeLiteral(t *testing.T) { }) assert.NoError(t, err) assert.Equal(t, "*core.Literal_Map", reflect.TypeOf(lit.GetValue()).String()) - assert.Equal(t, "*core.Literal_Collection", reflect.TypeOf(lit.GetMap().Literals["key1"].GetValue()).String()) + assert.Equal(t, "*core.Literal_Collection", reflect.TypeOf(lit.GetMap().GetLiterals()["key1"].GetValue()).String()) }) t.Run("Binary", func(t *testing.T) { @@ -165,7 +167,7 @@ func TestMakeLiteral(t *testing.T) { p, err := MakeLiteral(nil) assert.NoError(t, err) assert.NotNil(t, p.GetScalar()) - assert.Equal(t, "*core.Scalar_NoneType", reflect.TypeOf(p.GetScalar().Value).String()) + assert.Equal(t, "*core.Scalar_NoneType", reflect.TypeOf(p.GetScalar().GetValue()).String()) }) } @@ -203,9 +205,9 @@ func TestMakeDefaultLiteralForType(t *testing.T) { l, err := MakeDefaultLiteralForType(&core.LiteralType{Type: &core.LiteralType_Simple{Simple: test.ty}}) assert.NoError(t, err) if test.isPrimitive { - assert.Equal(t, test.tyName, reflect.TypeOf(l.GetScalar().GetPrimitive().Value).String()) + assert.Equal(t, test.tyName, reflect.TypeOf(l.GetScalar().GetPrimitive().GetValue()).String()) } else { - assert.Equal(t, test.tyName, reflect.TypeOf(l.GetScalar().Value).String()) + assert.Equal(t, test.tyName, reflect.TypeOf(l.GetScalar().GetValue()).String()) } }) } @@ -219,7 +221,7 @@ func TestMakeDefaultLiteralForType(t *testing.T) { t.Run("Blob", func(t *testing.T) { l, err := MakeDefaultLiteralForType(&core.LiteralType{Type: &core.LiteralType_Blob{}}) assert.NoError(t, err) - assert.Equal(t, "*core.Scalar_Blob", reflect.TypeOf(l.GetScalar().Value).String()) + assert.Equal(t, "*core.Scalar_Blob", reflect.TypeOf(l.GetScalar().GetValue()).String()) }) t.Run("Collection", func(t *testing.T) { @@ -298,7 +300,7 @@ func TestMustMakeDefaultLiteralForType(t *testing.T) { t.Run("Blob", func(t *testing.T) { l := MustMakeDefaultLiteralForType(&core.LiteralType{Type: &core.LiteralType_Blob{}}) - assert.Equal(t, "*core.Scalar_Blob", reflect.TypeOf(l.GetScalar().Value).String()) + assert.Equal(t, "*core.Scalar_Blob", reflect.TypeOf(l.GetScalar().GetValue()).String()) }) } @@ -455,6 +457,7 @@ func TestMakeLiteralForType(t *testing.T) { }) t.Run("Generic", func(t *testing.T) { + os.Setenv(FlyteUseOldDcFormat, "true") literalVal := map[string]interface{}{ "x": 1, "y": "ystringvalue", @@ -476,10 +479,73 @@ func TestMakeLiteralForType(t *testing.T) { Fields: fieldsMap, } extractedStructValue := extractedLiteralVal.(*structpb.Struct) - assert.Equal(t, len(expectedStructVal.Fields), len(extractedStructValue.Fields)) - for key, val := range expectedStructVal.Fields { - assert.Equal(t, val.Kind, extractedStructValue.Fields[key].Kind) + assert.Equal(t, len(expectedStructVal.GetFields()), len(extractedStructValue.GetFields())) + for key, val := range expectedStructVal.GetFields() { + assert.Equal(t, val.GetKind(), extractedStructValue.GetFields()[key].GetKind()) } + os.Unsetenv(FlyteUseOldDcFormat) + }) + + t.Run("SimpleBinary", func(t *testing.T) { + // We compare the deserialized values instead of the raw msgpack bytes because Go does not guarantee the order + // of map keys during serialization. This means that while the serialized bytes may differ, the deserialized + // values should be logically equivalent. + + var literalType = &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_STRUCT}} + v := map[string]interface{}{ + "a": int64(1), + "b": 3.14, + "c": "example_string", + "d": map[string]interface{}{ + "1": int64(100), + "2": int64(200), + }, + "e": map[string]interface{}{ + "a": int64(1), + "b": 3.14, + }, + "f": []string{"a", "b", "c"}, + } + + val, err := MakeLiteralForType(literalType, v) + assert.NoError(t, err) + + msgpackBytes, err := msgpack.Marshal(v) + assert.NoError(t, err) + + literalVal := &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Binary{ + Binary: &core.Binary{ + Value: msgpackBytes, + Tag: MESSAGEPACK, + }, + }, + }, + }, + } + + expectedLiteralVal, err := ExtractFromLiteral(literalVal) + assert.NoError(t, err) + actualLiteralVal, err := ExtractFromLiteral(val) + assert.NoError(t, err) + + // Check if the extracted value is of type *core.Binary (not []byte) + expectedBinary, ok := expectedLiteralVal.(*core.Binary) + assert.True(t, ok, "expectedLiteralVal is not of type *core.Binary") + actualBinary, ok := actualLiteralVal.(*core.Binary) + assert.True(t, ok, "actualLiteralVal is not of type *core.Binary") + + // Now check if the Binary values match + var expectedVal, actualVal map[string]interface{} + err = msgpack.Unmarshal(expectedBinary.GetValue(), &expectedVal) + assert.NoError(t, err) + err = msgpack.Unmarshal(actualBinary.GetValue(), &actualVal) + assert.NoError(t, err) + + // Finally, assert that the deserialized values are equal + assert.Equal(t, expectedVal, actualVal) }) t.Run("ArrayStrings", func(t *testing.T) { diff --git a/flyteidl/gen/pb-es/flyteidl/admin/agent_pb.ts b/flyteidl/gen/pb-es/flyteidl/admin/agent_pb.ts index f9f6c37564..ff858642dc 100644 --- a/flyteidl/gen/pb-es/flyteidl/admin/agent_pb.ts +++ b/flyteidl/gen/pb-es/flyteidl/admin/agent_pb.ts @@ -10,7 +10,7 @@ import { TaskNodeOverrides } from "../core/workflow_pb.js"; import { Identity } from "../core/security_pb.js"; import { LiteralMap } from "../core/literals_pb.js"; import { TaskTemplate } from "../core/tasks_pb.js"; -import { TaskExecution_Phase, TaskLog } from "../core/execution_pb.js"; +import { ExecutionError_ErrorKind, TaskExecution_Phase, TaskLog } from "../core/execution_pb.js"; import { ExecutionMetricResult } from "../core/metrics_pb.js"; /** @@ -637,6 +637,13 @@ export class Resource extends Message { */ customInfo?: Struct; + /** + * The error raised during execution + * + * @generated from field: flyteidl.admin.AgentError agent_error = 7; + */ + agentError?: AgentError; + constructor(data?: PartialMessage) { super(); proto3.util.initPartial(data, this); @@ -651,6 +658,7 @@ export class Resource extends Message { { no: 4, name: "log_links", kind: "message", T: TaskLog, repeated: true }, { no: 5, name: "phase", kind: "enum", T: proto3.getEnumType(TaskExecution_Phase) }, { no: 6, name: "custom_info", kind: "message", T: Struct }, + { no: 7, name: "agent_error", kind: "message", T: AgentError }, ]); static fromBinary(bytes: Uint8Array, options?: Partial): Resource { @@ -1368,3 +1376,83 @@ export class GetTaskLogsResponse extends Message { } } +/** + * Error message to propagate detailed errors from agent executions to the execution + * engine. + * + * @generated from message flyteidl.admin.AgentError + */ +export class AgentError extends Message { + /** + * A simplified code for errors, so that we can provide a glossary of all possible errors. + * + * @generated from field: string code = 1; + */ + code = ""; + + /** + * An abstract error kind for this error. Defaults to Non_Recoverable if not specified. + * + * @generated from field: flyteidl.admin.AgentError.Kind kind = 3; + */ + kind = AgentError_Kind.NON_RECOVERABLE; + + /** + * Defines the origin of the error (system, user, unknown). + * + * @generated from field: flyteidl.core.ExecutionError.ErrorKind origin = 4; + */ + origin = ExecutionError_ErrorKind.UNKNOWN; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "flyteidl.admin.AgentError"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "code", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + { no: 3, name: "kind", kind: "enum", T: proto3.getEnumType(AgentError_Kind) }, + { no: 4, name: "origin", kind: "enum", T: proto3.getEnumType(ExecutionError_ErrorKind) }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): AgentError { + return new AgentError().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): AgentError { + return new AgentError().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): AgentError { + return new AgentError().fromJsonString(jsonString, options); + } + + static equals(a: AgentError | PlainMessage | undefined, b: AgentError | PlainMessage | undefined): boolean { + return proto3.util.equals(AgentError, a, b); + } +} + +/** + * Defines a generic error type that dictates the behavior of the retry strategy. + * + * @generated from enum flyteidl.admin.AgentError.Kind + */ +export enum AgentError_Kind { + /** + * @generated from enum value: NON_RECOVERABLE = 0; + */ + NON_RECOVERABLE = 0, + + /** + * @generated from enum value: RECOVERABLE = 1; + */ + RECOVERABLE = 1, +} +// Retrieve enum metadata with: proto3.getEnumType(AgentError_Kind) +proto3.util.setEnumType(AgentError_Kind, "flyteidl.admin.AgentError.Kind", [ + { no: 0, name: "NON_RECOVERABLE" }, + { no: 1, name: "RECOVERABLE" }, +]); + diff --git a/flyteidl/gen/pb-es/flyteidl/admin/node_execution_pb.ts b/flyteidl/gen/pb-es/flyteidl/admin/node_execution_pb.ts index 97b89426fe..6b6f8aa022 100644 --- a/flyteidl/gen/pb-es/flyteidl/admin/node_execution_pb.ts +++ b/flyteidl/gen/pb-es/flyteidl/admin/node_execution_pb.ts @@ -337,6 +337,13 @@ export class NodeExecutionMetaData extends Message { */ isArray = false; + /** + * Whether this node is an eager node. + * + * @generated from field: bool is_eager = 6; + */ + isEager = false; + constructor(data?: PartialMessage) { super(); proto3.util.initPartial(data, this); @@ -350,6 +357,7 @@ export class NodeExecutionMetaData extends Message { { no: 3, name: "spec_node_id", kind: "scalar", T: 9 /* ScalarType.STRING */ }, { no: 4, name: "is_dynamic", kind: "scalar", T: 8 /* ScalarType.BOOL */ }, { no: 5, name: "is_array", kind: "scalar", T: 8 /* ScalarType.BOOL */ }, + { no: 6, name: "is_eager", kind: "scalar", T: 8 /* ScalarType.BOOL */ }, ]); static fromBinary(bytes: Uint8Array, options?: Partial): NodeExecutionMetaData { diff --git a/flyteidl/gen/pb-es/flyteidl/core/tasks_pb.ts b/flyteidl/gen/pb-es/flyteidl/core/tasks_pb.ts index 5cc011314c..eb2156cce7 100644 --- a/flyteidl/gen/pb-es/flyteidl/core/tasks_pb.ts +++ b/flyteidl/gen/pb-es/flyteidl/core/tasks_pb.ts @@ -447,6 +447,14 @@ export class TaskMetadata extends Message { */ cacheIgnoreInputVars: string[] = []; + /** + * is_eager indicates whether the task is eager or not. + * This would be used by CreateTask endpoint. + * + * @generated from field: bool is_eager = 14; + */ + isEager = false; + constructor(data?: PartialMessage) { super(); proto3.util.initPartial(data, this); @@ -467,6 +475,7 @@ export class TaskMetadata extends Message { { no: 11, name: "tags", kind: "map", K: 9 /* ScalarType.STRING */, V: {kind: "scalar", T: 9 /* ScalarType.STRING */} }, { no: 12, name: "pod_template_name", kind: "scalar", T: 9 /* ScalarType.STRING */ }, { no: 13, name: "cache_ignore_input_vars", kind: "scalar", T: 9 /* ScalarType.STRING */, repeated: true }, + { no: 14, name: "is_eager", kind: "scalar", T: 8 /* ScalarType.BOOL */ }, ]); static fromBinary(bytes: Uint8Array, options?: Partial): TaskMetadata { diff --git a/flyteidl/gen/pb-es/flyteidl/core/workflow_pb.ts b/flyteidl/gen/pb-es/flyteidl/core/workflow_pb.ts index 20c235c187..7dce890e0d 100644 --- a/flyteidl/gen/pb-es/flyteidl/core/workflow_pb.ts +++ b/flyteidl/gen/pb-es/flyteidl/core/workflow_pb.ts @@ -4,7 +4,7 @@ // @ts-nocheck import type { BinaryReadOptions, FieldList, JsonReadOptions, JsonValue, PartialMessage, PlainMessage } from "@bufbuild/protobuf"; -import { Duration, Message, proto3 } from "@bufbuild/protobuf"; +import { BoolValue, Duration, Message, proto3 } from "@bufbuild/protobuf"; import { BooleanExpression } from "./condition_pb.js"; import { Error, LiteralType } from "./types_pb.js"; import { Identifier } from "./identifier_pb.js"; @@ -554,6 +554,13 @@ export class ArrayNode extends Message { */ executionMode = ArrayNode_ExecutionMode.MINIMAL_STATE; + /** + * Indicates whether the sub node's original interface was altered + * + * @generated from field: google.protobuf.BoolValue is_original_sub_node_interface = 6; + */ + isOriginalSubNodeInterface?: boolean; + constructor(data?: PartialMessage) { super(); proto3.util.initPartial(data, this); @@ -567,6 +574,7 @@ export class ArrayNode extends Message { { no: 3, name: "min_successes", kind: "scalar", T: 13 /* ScalarType.UINT32 */, oneof: "success_criteria" }, { no: 4, name: "min_success_ratio", kind: "scalar", T: 2 /* ScalarType.FLOAT */, oneof: "success_criteria" }, { no: 5, name: "execution_mode", kind: "enum", T: proto3.getEnumType(ArrayNode_ExecutionMode) }, + { no: 6, name: "is_original_sub_node_interface", kind: "message", T: BoolValue }, ]); static fromBinary(bytes: Uint8Array, options?: Partial): ArrayNode { diff --git a/flyteidl/gen/pb-es/flyteidl/event/event_pb.ts b/flyteidl/gen/pb-es/flyteidl/event/event_pb.ts index 9e5fd39c1d..7cc67bfc62 100644 --- a/flyteidl/gen/pb-es/flyteidl/event/event_pb.ts +++ b/flyteidl/gen/pb-es/flyteidl/event/event_pb.ts @@ -307,6 +307,13 @@ export class NodeExecutionEvent extends Message { */ isInDynamicChain = false; + /** + * Whether this node launched an eager task. + * + * @generated from field: bool is_eager = 25; + */ + isEager = false; + constructor(data?: PartialMessage) { super(); proto3.util.initPartial(data, this); @@ -339,6 +346,7 @@ export class NodeExecutionEvent extends Message { { no: 22, name: "is_array", kind: "scalar", T: 8 /* ScalarType.BOOL */ }, { no: 23, name: "target_entity", kind: "message", T: Identifier }, { no: 24, name: "is_in_dynamic_chain", kind: "scalar", T: 8 /* ScalarType.BOOL */ }, + { no: 25, name: "is_eager", kind: "scalar", T: 8 /* ScalarType.BOOL */ }, ]); static fromBinary(bytes: Uint8Array, options?: Partial): NodeExecutionEvent { diff --git a/flyteidl/gen/pb-es/flyteidl/plugins/ray_pb.ts b/flyteidl/gen/pb-es/flyteidl/plugins/ray_pb.ts index 10877bcb42..0c0a33e0a1 100644 --- a/flyteidl/gen/pb-es/flyteidl/plugins/ray_pb.ts +++ b/flyteidl/gen/pb-es/flyteidl/plugins/ray_pb.ts @@ -5,6 +5,7 @@ import type { BinaryReadOptions, FieldList, JsonReadOptions, JsonValue, PartialMessage, PlainMessage } from "@bufbuild/protobuf"; import { Message, proto3 } from "@bufbuild/protobuf"; +import { K8sPod } from "../core/tasks_pb.js"; /** * RayJobSpec defines the desired state of RayJob @@ -153,6 +154,13 @@ export class HeadGroupSpec extends Message { */ rayStartParams: { [key: string]: string } = {}; + /** + * Pod Spec for the ray head pod + * + * @generated from field: flyteidl.core.K8sPod k8s_pod = 2; + */ + k8sPod?: K8sPod; + constructor(data?: PartialMessage) { super(); proto3.util.initPartial(data, this); @@ -162,6 +170,7 @@ export class HeadGroupSpec extends Message { static readonly typeName = "flyteidl.plugins.HeadGroupSpec"; static readonly fields: FieldList = proto3.util.newFieldList(() => [ { no: 1, name: "ray_start_params", kind: "map", K: 9 /* ScalarType.STRING */, V: {kind: "scalar", T: 9 /* ScalarType.STRING */} }, + { no: 2, name: "k8s_pod", kind: "message", T: K8sPod }, ]); static fromBinary(bytes: Uint8Array, options?: Partial): HeadGroupSpec { @@ -223,6 +232,13 @@ export class WorkerGroupSpec extends Message { */ rayStartParams: { [key: string]: string } = {}; + /** + * Pod Spec for ray worker pods + * + * @generated from field: flyteidl.core.K8sPod k8s_pod = 6; + */ + k8sPod?: K8sPod; + constructor(data?: PartialMessage) { super(); proto3.util.initPartial(data, this); @@ -236,6 +252,7 @@ export class WorkerGroupSpec extends Message { { no: 3, name: "min_replicas", kind: "scalar", T: 5 /* ScalarType.INT32 */ }, { no: 4, name: "max_replicas", kind: "scalar", T: 5 /* ScalarType.INT32 */ }, { no: 5, name: "ray_start_params", kind: "map", K: 9 /* ScalarType.STRING */, V: {kind: "scalar", T: 9 /* ScalarType.STRING */} }, + { no: 6, name: "k8s_pod", kind: "message", T: K8sPod }, ]); static fromBinary(bytes: Uint8Array, options?: Partial): WorkerGroupSpec { diff --git a/flyteidl/gen/pb-go/flyteidl/admin/agent.pb.go b/flyteidl/gen/pb-go/flyteidl/admin/agent.pb.go index 653fce6266..a49ae30acb 100644 --- a/flyteidl/gen/pb-go/flyteidl/admin/agent.pb.go +++ b/flyteidl/gen/pb-go/flyteidl/admin/agent.pb.go @@ -82,6 +82,53 @@ func (State) EnumDescriptor() ([]byte, []int) { return file_flyteidl_admin_agent_proto_rawDescGZIP(), []int{0} } +// Defines a generic error type that dictates the behavior of the retry strategy. +type AgentError_Kind int32 + +const ( + AgentError_NON_RECOVERABLE AgentError_Kind = 0 + AgentError_RECOVERABLE AgentError_Kind = 1 +) + +// Enum value maps for AgentError_Kind. +var ( + AgentError_Kind_name = map[int32]string{ + 0: "NON_RECOVERABLE", + 1: "RECOVERABLE", + } + AgentError_Kind_value = map[string]int32{ + "NON_RECOVERABLE": 0, + "RECOVERABLE": 1, + } +) + +func (x AgentError_Kind) Enum() *AgentError_Kind { + p := new(AgentError_Kind) + *p = x + return p +} + +func (x AgentError_Kind) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AgentError_Kind) Descriptor() protoreflect.EnumDescriptor { + return file_flyteidl_admin_agent_proto_enumTypes[1].Descriptor() +} + +func (AgentError_Kind) Type() protoreflect.EnumType { + return &file_flyteidl_admin_agent_proto_enumTypes[1] +} + +func (x AgentError_Kind) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AgentError_Kind.Descriptor instead. +func (AgentError_Kind) EnumDescriptor() ([]byte, []int) { + return file_flyteidl_admin_agent_proto_rawDescGZIP(), []int{24, 0} +} + // Represents a subset of runtime task execution metadata that are relevant to external plugins. type TaskExecutionMetadata struct { state protoimpl.MessageState @@ -778,6 +825,8 @@ type Resource struct { Phase core.TaskExecution_Phase `protobuf:"varint,5,opt,name=phase,proto3,enum=flyteidl.core.TaskExecution_Phase" json:"phase,omitempty"` // Custom data specific to the agent. CustomInfo *structpb.Struct `protobuf:"bytes,6,opt,name=custom_info,json=customInfo,proto3" json:"custom_info,omitempty"` + // The error raised during execution + AgentError *AgentError `protobuf:"bytes,7,opt,name=agent_error,json=agentError,proto3" json:"agent_error,omitempty"` } func (x *Resource) Reset() { @@ -855,6 +904,13 @@ func (x *Resource) GetCustomInfo() *structpb.Struct { return nil } +func (x *Resource) GetAgentError() *AgentError { + if x != nil { + return x.AgentError + } + return nil +} + // A message used to delete a task. type DeleteTaskRequest struct { state protoimpl.MessageState @@ -1712,6 +1768,74 @@ func (*GetTaskLogsResponse_Header) isGetTaskLogsResponse_Part() {} func (*GetTaskLogsResponse_Body) isGetTaskLogsResponse_Part() {} +// Error message to propagate detailed errors from agent executions to the execution +// engine. +type AgentError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A simplified code for errors, so that we can provide a glossary of all possible errors. + Code string `protobuf:"bytes,1,opt,name=code,proto3" json:"code,omitempty"` + // An abstract error kind for this error. Defaults to Non_Recoverable if not specified. + Kind AgentError_Kind `protobuf:"varint,3,opt,name=kind,proto3,enum=flyteidl.admin.AgentError_Kind" json:"kind,omitempty"` + // Defines the origin of the error (system, user, unknown). + Origin core.ExecutionError_ErrorKind `protobuf:"varint,4,opt,name=origin,proto3,enum=flyteidl.core.ExecutionError_ErrorKind" json:"origin,omitempty"` +} + +func (x *AgentError) Reset() { + *x = AgentError{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl_admin_agent_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AgentError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AgentError) ProtoMessage() {} + +func (x *AgentError) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl_admin_agent_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AgentError.ProtoReflect.Descriptor instead. +func (*AgentError) Descriptor() ([]byte, []int) { + return file_flyteidl_admin_agent_proto_rawDescGZIP(), []int{24} +} + +func (x *AgentError) GetCode() string { + if x != nil { + return x.Code + } + return "" +} + +func (x *AgentError) GetKind() AgentError_Kind { + if x != nil { + return x.Kind + } + return AgentError_NON_RECOVERABLE +} + +func (x *AgentError) GetOrigin() core.ExecutionError_ErrorKind { + if x != nil { + return x.Origin + } + return core.ExecutionError_ErrorKind(0) +} + var File_flyteidl_admin_agent_proto protoreflect.FileDescriptor var file_flyteidl_admin_agent_proto_rawDesc = []byte{ @@ -1870,7 +1994,7 @@ var file_flyteidl_admin_agent_proto_rawDesc = []byte{ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xb3, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xf0, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x2f, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, @@ -1890,122 +2014,138 @@ var file_flyteidl_admin_agent_proto_rawDesc = []byte{ 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x9c, 0x01, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, - 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x09, 0x74, 0x61, - 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, - 0x12, 0x41, 0x0a, 0x0d, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, - 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, - 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, - 0x65, 0x67, 0x6f, 0x72, 0x79, 0x52, 0x0c, 0x74, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, - 0x6f, 0x72, 0x79, 0x22, 0x14, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x73, - 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc4, 0x01, 0x0a, 0x05, 0x41, 0x67, - 0x65, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x14, 0x73, 0x75, 0x70, 0x70, 0x6f, - 0x72, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x12, 0x73, 0x75, 0x70, 0x70, 0x6f, - 0x72, 0x74, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x17, 0x0a, - 0x07, 0x69, 0x73, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, - 0x69, 0x73, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x58, 0x0a, 0x19, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, - 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, - 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, - 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x43, - 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x52, 0x17, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x69, 0x65, 0x73, - 0x22, 0x3c, 0x0a, 0x0c, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3b, 0x0a, 0x0b, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x0a, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x22, 0x9c, 0x01, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, + 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x41, 0x0a, + 0x0d, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, + 0x72, 0x79, 0x52, 0x0c, 0x74, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, + 0x22, 0x14, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc4, 0x01, 0x0a, 0x05, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x25, - 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x3f, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x41, 0x67, 0x65, 0x6e, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x67, 0x65, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, - 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, - 0x05, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x22, 0x13, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x67, - 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x43, 0x0a, 0x12, 0x4c, - 0x69, 0x73, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x2d, 0x0a, 0x06, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, - 0x22, 0xdb, 0x02, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x09, 0x74, 0x61, - 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, - 0x12, 0x18, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x04, - 0x73, 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x73, 0x74, 0x65, 0x70, 0x12, 0x41, 0x0a, 0x0d, 0x74, - 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, - 0x52, 0x0c, 0x74, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x22, 0x58, - 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, - 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, - 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0xc9, 0x01, 0x0a, 0x12, 0x47, 0x65, 0x74, - 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1f, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x12, 0x41, 0x0a, 0x0d, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, - 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, - 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, - 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x52, 0x0c, 0x74, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, - 0x67, 0x6f, 0x72, 0x79, 0x22, 0x31, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4c, - 0x6f, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x33, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, 0x61, - 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, - 0x64, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0xa1, 0x01, 0x0a, - 0x13, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x14, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, + 0x64, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x12, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, + 0x64, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x73, + 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x69, 0x73, 0x53, + 0x79, 0x6e, 0x63, 0x12, 0x58, 0x0a, 0x19, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, + 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x69, 0x65, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, + 0x67, 0x6f, 0x72, 0x79, 0x52, 0x17, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x54, + 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x22, 0x3c, 0x0a, + 0x0c, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x25, 0x0a, 0x0f, 0x47, + 0x65, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x22, 0x3f, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x61, 0x67, + 0x65, 0x6e, 0x74, 0x22, 0x13, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x43, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, + 0x41, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, + 0x0a, 0x06, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xdb, 0x02, + 0x0a, 0x15, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, + 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x18, 0x0a, + 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, + 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x04, 0x73, 0x74, 0x65, + 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x04, 0x73, 0x74, 0x65, 0x70, 0x12, 0x41, 0x0a, 0x0d, 0x74, 0x61, 0x73, 0x6b, + 0x5f, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x52, 0x0c, 0x74, + 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x22, 0x58, 0x0a, 0x16, 0x47, + 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0xc9, 0x01, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, + 0x6b, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x09, + 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x02, 0x18, 0x01, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, + 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x41, + 0x0a, 0x0d, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, + 0x6f, 0x72, 0x79, 0x52, 0x0c, 0x74, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, + 0x79, 0x22, 0x31, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x33, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4c, + 0x6f, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x12, + 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0xa1, 0x01, 0x0a, 0x13, 0x47, 0x65, + 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x43, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x29, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, - 0x00, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x04, 0x62, 0x6f, 0x64, - 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, - 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, - 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, - 0x48, 0x00, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x42, 0x06, 0x0a, 0x04, 0x70, 0x61, 0x72, 0x74, - 0x2a, 0x62, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, - 0x52, 0x59, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x00, - 0x12, 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x41, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x46, 0x41, - 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x45, 0x4e, 0x44, 0x49, - 0x4e, 0x47, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, - 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x04, - 0x1a, 0x02, 0x18, 0x01, 0x42, 0xb6, 0x01, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, - 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x42, 0x0a, 0x41, 0x67, 0x65, - 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, - 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x67, 0x65, - 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, - 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xa2, 0x02, 0x03, 0x46, 0x41, 0x58, 0xaa, 0x02, 0x0e, 0x46, - 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0xca, 0x02, 0x0e, - 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0xe2, 0x02, - 0x1a, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, - 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0f, 0x46, 0x6c, - 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x48, 0x00, 0x52, + 0x04, 0x62, 0x6f, 0x64, 0x79, 0x42, 0x06, 0x0a, 0x04, 0x70, 0x61, 0x72, 0x74, 0x22, 0xc4, 0x01, + 0x0a, 0x0a, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, + 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, + 0x12, 0x33, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x4b, 0x69, 0x6e, 0x64, 0x52, + 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x3f, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, + 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x06, + 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x22, 0x2c, 0x0a, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x13, + 0x0a, 0x0f, 0x4e, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x43, 0x4f, 0x56, 0x45, 0x52, 0x41, 0x42, 0x4c, + 0x45, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x43, 0x4f, 0x56, 0x45, 0x52, 0x41, 0x42, + 0x4c, 0x45, 0x10, 0x01, 0x2a, 0x62, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, + 0x11, 0x52, 0x45, 0x54, 0x52, 0x59, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x55, + 0x52, 0x45, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x41, 0x4e, 0x45, 0x4e, + 0x54, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x50, + 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, + 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, + 0x45, 0x44, 0x10, 0x04, 0x1a, 0x02, 0x18, 0x01, 0x42, 0xb6, 0x01, 0x0a, 0x12, 0x63, 0x6f, 0x6d, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x42, + 0x0a, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, + 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xa2, 0x02, 0x03, 0x46, 0x41, 0x58, + 0xaa, 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x41, 0x64, 0x6d, 0x69, + 0x6e, 0xca, 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x41, 0x64, 0x6d, + 0x69, 0x6e, 0xe2, 0x02, 0x1a, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x41, 0x64, + 0x6d, 0x69, 0x6e, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, + 0x02, 0x0f, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, + 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2020,90 +2160,96 @@ func file_flyteidl_admin_agent_proto_rawDescGZIP() []byte { return file_flyteidl_admin_agent_proto_rawDescData } -var file_flyteidl_admin_agent_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_flyteidl_admin_agent_proto_msgTypes = make([]protoimpl.MessageInfo, 27) +var file_flyteidl_admin_agent_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_flyteidl_admin_agent_proto_msgTypes = make([]protoimpl.MessageInfo, 28) var file_flyteidl_admin_agent_proto_goTypes = []interface{}{ (State)(0), // 0: flyteidl.admin.State - (*TaskExecutionMetadata)(nil), // 1: flyteidl.admin.TaskExecutionMetadata - (*CreateTaskRequest)(nil), // 2: flyteidl.admin.CreateTaskRequest - (*CreateTaskResponse)(nil), // 3: flyteidl.admin.CreateTaskResponse - (*CreateRequestHeader)(nil), // 4: flyteidl.admin.CreateRequestHeader - (*ExecuteTaskSyncRequest)(nil), // 5: flyteidl.admin.ExecuteTaskSyncRequest - (*ExecuteTaskSyncResponseHeader)(nil), // 6: flyteidl.admin.ExecuteTaskSyncResponseHeader - (*ExecuteTaskSyncResponse)(nil), // 7: flyteidl.admin.ExecuteTaskSyncResponse - (*GetTaskRequest)(nil), // 8: flyteidl.admin.GetTaskRequest - (*GetTaskResponse)(nil), // 9: flyteidl.admin.GetTaskResponse - (*Resource)(nil), // 10: flyteidl.admin.Resource - (*DeleteTaskRequest)(nil), // 11: flyteidl.admin.DeleteTaskRequest - (*DeleteTaskResponse)(nil), // 12: flyteidl.admin.DeleteTaskResponse - (*Agent)(nil), // 13: flyteidl.admin.Agent - (*TaskCategory)(nil), // 14: flyteidl.admin.TaskCategory - (*GetAgentRequest)(nil), // 15: flyteidl.admin.GetAgentRequest - (*GetAgentResponse)(nil), // 16: flyteidl.admin.GetAgentResponse - (*ListAgentsRequest)(nil), // 17: flyteidl.admin.ListAgentsRequest - (*ListAgentsResponse)(nil), // 18: flyteidl.admin.ListAgentsResponse - (*GetTaskMetricsRequest)(nil), // 19: flyteidl.admin.GetTaskMetricsRequest - (*GetTaskMetricsResponse)(nil), // 20: flyteidl.admin.GetTaskMetricsResponse - (*GetTaskLogsRequest)(nil), // 21: flyteidl.admin.GetTaskLogsRequest - (*GetTaskLogsResponseHeader)(nil), // 22: flyteidl.admin.GetTaskLogsResponseHeader - (*GetTaskLogsResponseBody)(nil), // 23: flyteidl.admin.GetTaskLogsResponseBody - (*GetTaskLogsResponse)(nil), // 24: flyteidl.admin.GetTaskLogsResponse - nil, // 25: flyteidl.admin.TaskExecutionMetadata.LabelsEntry - nil, // 26: flyteidl.admin.TaskExecutionMetadata.AnnotationsEntry - nil, // 27: flyteidl.admin.TaskExecutionMetadata.EnvironmentVariablesEntry - (*core.TaskExecutionIdentifier)(nil), // 28: flyteidl.core.TaskExecutionIdentifier - (*core.TaskNodeOverrides)(nil), // 29: flyteidl.core.TaskNodeOverrides - (*core.Identity)(nil), // 30: flyteidl.core.Identity - (*core.LiteralMap)(nil), // 31: flyteidl.core.LiteralMap - (*core.TaskTemplate)(nil), // 32: flyteidl.core.TaskTemplate - (*core.TaskLog)(nil), // 33: flyteidl.core.TaskLog - (core.TaskExecution_Phase)(0), // 34: flyteidl.core.TaskExecution.Phase - (*structpb.Struct)(nil), // 35: google.protobuf.Struct - (*timestamppb.Timestamp)(nil), // 36: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 37: google.protobuf.Duration - (*core.ExecutionMetricResult)(nil), // 38: flyteidl.core.ExecutionMetricResult + (AgentError_Kind)(0), // 1: flyteidl.admin.AgentError.Kind + (*TaskExecutionMetadata)(nil), // 2: flyteidl.admin.TaskExecutionMetadata + (*CreateTaskRequest)(nil), // 3: flyteidl.admin.CreateTaskRequest + (*CreateTaskResponse)(nil), // 4: flyteidl.admin.CreateTaskResponse + (*CreateRequestHeader)(nil), // 5: flyteidl.admin.CreateRequestHeader + (*ExecuteTaskSyncRequest)(nil), // 6: flyteidl.admin.ExecuteTaskSyncRequest + (*ExecuteTaskSyncResponseHeader)(nil), // 7: flyteidl.admin.ExecuteTaskSyncResponseHeader + (*ExecuteTaskSyncResponse)(nil), // 8: flyteidl.admin.ExecuteTaskSyncResponse + (*GetTaskRequest)(nil), // 9: flyteidl.admin.GetTaskRequest + (*GetTaskResponse)(nil), // 10: flyteidl.admin.GetTaskResponse + (*Resource)(nil), // 11: flyteidl.admin.Resource + (*DeleteTaskRequest)(nil), // 12: flyteidl.admin.DeleteTaskRequest + (*DeleteTaskResponse)(nil), // 13: flyteidl.admin.DeleteTaskResponse + (*Agent)(nil), // 14: flyteidl.admin.Agent + (*TaskCategory)(nil), // 15: flyteidl.admin.TaskCategory + (*GetAgentRequest)(nil), // 16: flyteidl.admin.GetAgentRequest + (*GetAgentResponse)(nil), // 17: flyteidl.admin.GetAgentResponse + (*ListAgentsRequest)(nil), // 18: flyteidl.admin.ListAgentsRequest + (*ListAgentsResponse)(nil), // 19: flyteidl.admin.ListAgentsResponse + (*GetTaskMetricsRequest)(nil), // 20: flyteidl.admin.GetTaskMetricsRequest + (*GetTaskMetricsResponse)(nil), // 21: flyteidl.admin.GetTaskMetricsResponse + (*GetTaskLogsRequest)(nil), // 22: flyteidl.admin.GetTaskLogsRequest + (*GetTaskLogsResponseHeader)(nil), // 23: flyteidl.admin.GetTaskLogsResponseHeader + (*GetTaskLogsResponseBody)(nil), // 24: flyteidl.admin.GetTaskLogsResponseBody + (*GetTaskLogsResponse)(nil), // 25: flyteidl.admin.GetTaskLogsResponse + (*AgentError)(nil), // 26: flyteidl.admin.AgentError + nil, // 27: flyteidl.admin.TaskExecutionMetadata.LabelsEntry + nil, // 28: flyteidl.admin.TaskExecutionMetadata.AnnotationsEntry + nil, // 29: flyteidl.admin.TaskExecutionMetadata.EnvironmentVariablesEntry + (*core.TaskExecutionIdentifier)(nil), // 30: flyteidl.core.TaskExecutionIdentifier + (*core.TaskNodeOverrides)(nil), // 31: flyteidl.core.TaskNodeOverrides + (*core.Identity)(nil), // 32: flyteidl.core.Identity + (*core.LiteralMap)(nil), // 33: flyteidl.core.LiteralMap + (*core.TaskTemplate)(nil), // 34: flyteidl.core.TaskTemplate + (*core.TaskLog)(nil), // 35: flyteidl.core.TaskLog + (core.TaskExecution_Phase)(0), // 36: flyteidl.core.TaskExecution.Phase + (*structpb.Struct)(nil), // 37: google.protobuf.Struct + (*timestamppb.Timestamp)(nil), // 38: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 39: google.protobuf.Duration + (*core.ExecutionMetricResult)(nil), // 40: flyteidl.core.ExecutionMetricResult + (core.ExecutionError_ErrorKind)(0), // 41: flyteidl.core.ExecutionError.ErrorKind } var file_flyteidl_admin_agent_proto_depIdxs = []int32{ - 28, // 0: flyteidl.admin.TaskExecutionMetadata.task_execution_id:type_name -> flyteidl.core.TaskExecutionIdentifier - 25, // 1: flyteidl.admin.TaskExecutionMetadata.labels:type_name -> flyteidl.admin.TaskExecutionMetadata.LabelsEntry - 26, // 2: flyteidl.admin.TaskExecutionMetadata.annotations:type_name -> flyteidl.admin.TaskExecutionMetadata.AnnotationsEntry - 27, // 3: flyteidl.admin.TaskExecutionMetadata.environment_variables:type_name -> flyteidl.admin.TaskExecutionMetadata.EnvironmentVariablesEntry - 29, // 4: flyteidl.admin.TaskExecutionMetadata.overrides:type_name -> flyteidl.core.TaskNodeOverrides - 30, // 5: flyteidl.admin.TaskExecutionMetadata.identity:type_name -> flyteidl.core.Identity - 31, // 6: flyteidl.admin.CreateTaskRequest.inputs:type_name -> flyteidl.core.LiteralMap - 32, // 7: flyteidl.admin.CreateTaskRequest.template:type_name -> flyteidl.core.TaskTemplate - 1, // 8: flyteidl.admin.CreateTaskRequest.task_execution_metadata:type_name -> flyteidl.admin.TaskExecutionMetadata - 32, // 9: flyteidl.admin.CreateRequestHeader.template:type_name -> flyteidl.core.TaskTemplate - 1, // 10: flyteidl.admin.CreateRequestHeader.task_execution_metadata:type_name -> flyteidl.admin.TaskExecutionMetadata - 4, // 11: flyteidl.admin.ExecuteTaskSyncRequest.header:type_name -> flyteidl.admin.CreateRequestHeader - 31, // 12: flyteidl.admin.ExecuteTaskSyncRequest.inputs:type_name -> flyteidl.core.LiteralMap - 10, // 13: flyteidl.admin.ExecuteTaskSyncResponseHeader.resource:type_name -> flyteidl.admin.Resource - 6, // 14: flyteidl.admin.ExecuteTaskSyncResponse.header:type_name -> flyteidl.admin.ExecuteTaskSyncResponseHeader - 31, // 15: flyteidl.admin.ExecuteTaskSyncResponse.outputs:type_name -> flyteidl.core.LiteralMap - 14, // 16: flyteidl.admin.GetTaskRequest.task_category:type_name -> flyteidl.admin.TaskCategory - 10, // 17: flyteidl.admin.GetTaskResponse.resource:type_name -> flyteidl.admin.Resource + 30, // 0: flyteidl.admin.TaskExecutionMetadata.task_execution_id:type_name -> flyteidl.core.TaskExecutionIdentifier + 27, // 1: flyteidl.admin.TaskExecutionMetadata.labels:type_name -> flyteidl.admin.TaskExecutionMetadata.LabelsEntry + 28, // 2: flyteidl.admin.TaskExecutionMetadata.annotations:type_name -> flyteidl.admin.TaskExecutionMetadata.AnnotationsEntry + 29, // 3: flyteidl.admin.TaskExecutionMetadata.environment_variables:type_name -> flyteidl.admin.TaskExecutionMetadata.EnvironmentVariablesEntry + 31, // 4: flyteidl.admin.TaskExecutionMetadata.overrides:type_name -> flyteidl.core.TaskNodeOverrides + 32, // 5: flyteidl.admin.TaskExecutionMetadata.identity:type_name -> flyteidl.core.Identity + 33, // 6: flyteidl.admin.CreateTaskRequest.inputs:type_name -> flyteidl.core.LiteralMap + 34, // 7: flyteidl.admin.CreateTaskRequest.template:type_name -> flyteidl.core.TaskTemplate + 2, // 8: flyteidl.admin.CreateTaskRequest.task_execution_metadata:type_name -> flyteidl.admin.TaskExecutionMetadata + 34, // 9: flyteidl.admin.CreateRequestHeader.template:type_name -> flyteidl.core.TaskTemplate + 2, // 10: flyteidl.admin.CreateRequestHeader.task_execution_metadata:type_name -> flyteidl.admin.TaskExecutionMetadata + 5, // 11: flyteidl.admin.ExecuteTaskSyncRequest.header:type_name -> flyteidl.admin.CreateRequestHeader + 33, // 12: flyteidl.admin.ExecuteTaskSyncRequest.inputs:type_name -> flyteidl.core.LiteralMap + 11, // 13: flyteidl.admin.ExecuteTaskSyncResponseHeader.resource:type_name -> flyteidl.admin.Resource + 7, // 14: flyteidl.admin.ExecuteTaskSyncResponse.header:type_name -> flyteidl.admin.ExecuteTaskSyncResponseHeader + 33, // 15: flyteidl.admin.ExecuteTaskSyncResponse.outputs:type_name -> flyteidl.core.LiteralMap + 15, // 16: flyteidl.admin.GetTaskRequest.task_category:type_name -> flyteidl.admin.TaskCategory + 11, // 17: flyteidl.admin.GetTaskResponse.resource:type_name -> flyteidl.admin.Resource 0, // 18: flyteidl.admin.Resource.state:type_name -> flyteidl.admin.State - 31, // 19: flyteidl.admin.Resource.outputs:type_name -> flyteidl.core.LiteralMap - 33, // 20: flyteidl.admin.Resource.log_links:type_name -> flyteidl.core.TaskLog - 34, // 21: flyteidl.admin.Resource.phase:type_name -> flyteidl.core.TaskExecution.Phase - 35, // 22: flyteidl.admin.Resource.custom_info:type_name -> google.protobuf.Struct - 14, // 23: flyteidl.admin.DeleteTaskRequest.task_category:type_name -> flyteidl.admin.TaskCategory - 14, // 24: flyteidl.admin.Agent.supported_task_categories:type_name -> flyteidl.admin.TaskCategory - 13, // 25: flyteidl.admin.GetAgentResponse.agent:type_name -> flyteidl.admin.Agent - 13, // 26: flyteidl.admin.ListAgentsResponse.agents:type_name -> flyteidl.admin.Agent - 36, // 27: flyteidl.admin.GetTaskMetricsRequest.start_time:type_name -> google.protobuf.Timestamp - 36, // 28: flyteidl.admin.GetTaskMetricsRequest.end_time:type_name -> google.protobuf.Timestamp - 37, // 29: flyteidl.admin.GetTaskMetricsRequest.step:type_name -> google.protobuf.Duration - 14, // 30: flyteidl.admin.GetTaskMetricsRequest.task_category:type_name -> flyteidl.admin.TaskCategory - 38, // 31: flyteidl.admin.GetTaskMetricsResponse.results:type_name -> flyteidl.core.ExecutionMetricResult - 14, // 32: flyteidl.admin.GetTaskLogsRequest.task_category:type_name -> flyteidl.admin.TaskCategory - 22, // 33: flyteidl.admin.GetTaskLogsResponse.header:type_name -> flyteidl.admin.GetTaskLogsResponseHeader - 23, // 34: flyteidl.admin.GetTaskLogsResponse.body:type_name -> flyteidl.admin.GetTaskLogsResponseBody - 35, // [35:35] is the sub-list for method output_type - 35, // [35:35] is the sub-list for method input_type - 35, // [35:35] is the sub-list for extension type_name - 35, // [35:35] is the sub-list for extension extendee - 0, // [0:35] is the sub-list for field type_name + 33, // 19: flyteidl.admin.Resource.outputs:type_name -> flyteidl.core.LiteralMap + 35, // 20: flyteidl.admin.Resource.log_links:type_name -> flyteidl.core.TaskLog + 36, // 21: flyteidl.admin.Resource.phase:type_name -> flyteidl.core.TaskExecution.Phase + 37, // 22: flyteidl.admin.Resource.custom_info:type_name -> google.protobuf.Struct + 26, // 23: flyteidl.admin.Resource.agent_error:type_name -> flyteidl.admin.AgentError + 15, // 24: flyteidl.admin.DeleteTaskRequest.task_category:type_name -> flyteidl.admin.TaskCategory + 15, // 25: flyteidl.admin.Agent.supported_task_categories:type_name -> flyteidl.admin.TaskCategory + 14, // 26: flyteidl.admin.GetAgentResponse.agent:type_name -> flyteidl.admin.Agent + 14, // 27: flyteidl.admin.ListAgentsResponse.agents:type_name -> flyteidl.admin.Agent + 38, // 28: flyteidl.admin.GetTaskMetricsRequest.start_time:type_name -> google.protobuf.Timestamp + 38, // 29: flyteidl.admin.GetTaskMetricsRequest.end_time:type_name -> google.protobuf.Timestamp + 39, // 30: flyteidl.admin.GetTaskMetricsRequest.step:type_name -> google.protobuf.Duration + 15, // 31: flyteidl.admin.GetTaskMetricsRequest.task_category:type_name -> flyteidl.admin.TaskCategory + 40, // 32: flyteidl.admin.GetTaskMetricsResponse.results:type_name -> flyteidl.core.ExecutionMetricResult + 15, // 33: flyteidl.admin.GetTaskLogsRequest.task_category:type_name -> flyteidl.admin.TaskCategory + 23, // 34: flyteidl.admin.GetTaskLogsResponse.header:type_name -> flyteidl.admin.GetTaskLogsResponseHeader + 24, // 35: flyteidl.admin.GetTaskLogsResponse.body:type_name -> flyteidl.admin.GetTaskLogsResponseBody + 1, // 36: flyteidl.admin.AgentError.kind:type_name -> flyteidl.admin.AgentError.Kind + 41, // 37: flyteidl.admin.AgentError.origin:type_name -> flyteidl.core.ExecutionError.ErrorKind + 38, // [38:38] is the sub-list for method output_type + 38, // [38:38] is the sub-list for method input_type + 38, // [38:38] is the sub-list for extension type_name + 38, // [38:38] is the sub-list for extension extendee + 0, // [0:38] is the sub-list for field type_name } func init() { file_flyteidl_admin_agent_proto_init() } @@ -2400,6 +2546,18 @@ func file_flyteidl_admin_agent_proto_init() { return nil } } + file_flyteidl_admin_agent_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AgentError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } file_flyteidl_admin_agent_proto_msgTypes[4].OneofWrappers = []interface{}{ (*ExecuteTaskSyncRequest_Header)(nil), @@ -2418,8 +2576,8 @@ func file_flyteidl_admin_agent_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_flyteidl_admin_agent_proto_rawDesc, - NumEnums: 1, - NumMessages: 27, + NumEnums: 2, + NumMessages: 28, NumExtensions: 0, NumServices: 0, }, diff --git a/flyteidl/gen/pb-go/flyteidl/admin/common.pb.go b/flyteidl/gen/pb-go/flyteidl/admin/common.pb.go index a20233700b..94889d196c 100644 --- a/flyteidl/gen/pb-go/flyteidl/admin/common.pb.go +++ b/flyteidl/gen/pb-go/flyteidl/admin/common.pb.go @@ -1907,25 +1907,26 @@ var file_flyteidl_admin_common_proto_rawDesc = []byte{ 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x65, 0x63, 0x6b, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x65, 0x63, 0x6b, 0x2a, 0x5c, 0x0a, 0x10, 0x4e, 0x61, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x65, 0x63, 0x6b, 0x2a, 0x68, 0x0a, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x4e, 0x41, 0x4d, 0x45, 0x44, 0x5f, 0x45, 0x4e, 0x54, 0x49, 0x54, 0x59, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x00, 0x12, 0x19, 0x0a, 0x15, 0x4e, 0x41, 0x4d, 0x45, 0x44, 0x5f, 0x45, 0x4e, 0x54, 0x49, 0x54, 0x59, 0x5f, 0x41, 0x52, 0x43, 0x48, 0x49, 0x56, 0x45, 0x44, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x59, 0x53, 0x54, 0x45, 0x4d, 0x5f, 0x47, 0x45, 0x4e, - 0x45, 0x52, 0x41, 0x54, 0x45, 0x44, 0x10, 0x02, 0x42, 0xb7, 0x01, 0x0a, 0x12, 0x63, 0x6f, 0x6d, - 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x42, - 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, - 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, - 0x64, 0x6c, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, - 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xa2, 0x02, 0x03, 0x46, 0x41, - 0x58, 0xaa, 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x41, 0x64, 0x6d, - 0x69, 0x6e, 0xca, 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x41, 0x64, - 0x6d, 0x69, 0x6e, 0xe2, 0x02, 0x1a, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x41, - 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0xea, 0x02, 0x0f, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x3a, 0x3a, 0x41, 0x64, 0x6d, - 0x69, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x45, 0x52, 0x41, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x04, 0x08, 0x03, 0x10, 0x03, 0x22, 0x04, + 0x08, 0x04, 0x10, 0x04, 0x42, 0xb7, 0x01, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x42, 0x0b, 0x43, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x67, + 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xa2, 0x02, 0x03, 0x46, 0x41, 0x58, 0xaa, 0x02, 0x0e, + 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0xca, 0x02, + 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0xe2, + 0x02, 0x1a, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, + 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0f, 0x46, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/flyteidl/gen/pb-go/flyteidl/admin/execution.pb.go b/flyteidl/gen/pb-go/flyteidl/admin/execution.pb.go index 1b878ceeb6..6e4a94b85e 100644 --- a/flyteidl/gen/pb-go/flyteidl/admin/execution.pb.go +++ b/flyteidl/gen/pb-go/flyteidl/admin/execution.pb.go @@ -2142,7 +2142,7 @@ var file_flyteidl_admin_execution_proto_rawDesc = []byte{ 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x85, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x8b, 0x05, 0x0a, 0x11, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x43, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, @@ -2176,179 +2176,179 @@ var file_flyteidl_admin_execution_proto_rawDesc = []byte{ 0x64, 0x73, 0x18, 0x12, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, 0x44, 0x52, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, 0x64, 0x73, - 0x22, 0x74, 0x0a, 0x0d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, + 0x22, 0x7a, 0x0a, 0x0d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x41, 0x4e, 0x55, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x43, 0x48, 0x45, 0x44, 0x55, 0x4c, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x59, 0x53, 0x54, 0x45, 0x4d, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x4c, 0x41, 0x55, 0x4e, 0x43, 0x48, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x48, 0x49, 0x4c, 0x44, 0x5f, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, 0x52, 0x45, 0x43, 0x4f, 0x56, 0x45, 0x52, 0x45, 0x44, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x52, 0x49, - 0x47, 0x47, 0x45, 0x52, 0x10, 0x06, 0x22, 0x56, 0x0a, 0x10, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x0d, 0x6e, 0x6f, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xd6, - 0x09, 0x0a, 0x0d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x70, 0x65, 0x63, - 0x12, 0x3a, 0x0a, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x52, 0x0a, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x50, 0x6c, 0x61, 0x6e, 0x12, 0x35, 0x0a, 0x06, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, - 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, - 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x69, 0x6e, 0x70, - 0x75, 0x74, 0x73, 0x12, 0x3d, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x12, 0x48, 0x0a, 0x0d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, - 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0d, 0x6e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0b, - 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x6c, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x08, 0x48, 0x00, 0x52, 0x0a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x6c, 0x6c, 0x12, - 0x2e, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x16, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, - 0x3d, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, - 0x0a, 0x10, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, - 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, - 0x79, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x0f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, - 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x39, 0x0a, 0x09, 0x61, 0x75, 0x74, - 0x68, 0x5f, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x66, - 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x41, 0x75, - 0x74, 0x68, 0x52, 0x6f, 0x6c, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, 0x61, 0x75, 0x74, 0x68, - 0x52, 0x6f, 0x6c, 0x65, 0x12, 0x4d, 0x0a, 0x12, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, - 0x6f, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x52, 0x10, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6c, - 0x6c, 0x65, 0x6c, 0x69, 0x73, 0x6d, 0x18, 0x12, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x6d, 0x61, - 0x78, 0x50, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x69, 0x73, 0x6d, 0x12, 0x58, 0x0a, 0x16, - 0x72, 0x61, 0x77, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x66, - 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x61, - 0x77, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x13, 0x72, 0x61, 0x77, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, 0x61, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x50, 0x0a, 0x12, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x5f, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x14, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x73, 0x73, 0x69, 0x67, - 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x73, - 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x0d, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6f, 0x76, - 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x18, 0x16, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0e, 0x6f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, - 0x63, 0x68, 0x65, 0x12, 0x28, 0x0a, 0x04, 0x65, 0x6e, 0x76, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x45, 0x6e, 0x76, 0x73, 0x52, 0x04, 0x65, 0x6e, 0x76, 0x73, 0x12, 0x16, 0x0a, - 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x18, 0x20, 0x03, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x5d, 0x0a, 0x17, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, - 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, - 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x15, 0x65, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x12, 0x61, 0x0a, 0x19, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x65, 0x6e, 0x76, 0x5f, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, - 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x45, 0x6e, 0x76, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x17, - 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x76, 0x41, 0x73, 0x73, 0x69, - 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x42, 0x18, 0x0a, 0x16, 0x6e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, - 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x6d, 0x0a, 0x19, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x02, 0x69, 0x64, - 0x12, 0x14, 0x0a, 0x05, 0x63, 0x61, 0x75, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x63, 0x61, 0x75, 0x73, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5d, 0x0a, 0x1f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, - 0x02, 0x69, 0x64, 0x22, 0x88, 0x02, 0x0a, 0x20, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x07, 0x6f, 0x75, 0x74, 0x70, - 0x75, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x66, 0x6c, 0x79, 0x74, - 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x55, 0x72, 0x6c, 0x42, 0x6c, - 0x6f, 0x62, 0x42, 0x02, 0x18, 0x01, 0x52, 0x07, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, - 0x33, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x55, 0x72, 0x6c, 0x42, 0x6c, 0x6f, 0x62, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x69, 0x6e, - 0x70, 0x75, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x0b, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x70, - 0x75, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, - 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, - 0x6c, 0x4d, 0x61, 0x70, 0x52, 0x0a, 0x66, 0x75, 0x6c, 0x6c, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, - 0x12, 0x3c, 0x0a, 0x0c, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, - 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, - 0x70, 0x52, 0x0b, 0x66, 0x75, 0x6c, 0x6c, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x22, 0x8a, - 0x01, 0x0a, 0x16, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x52, 0x02, 0x69, 0x64, 0x12, 0x34, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0xae, 0x01, 0x0a, 0x1b, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, - 0x61, 0x6e, 0x67, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x34, 0x0a, 0x05, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, - 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x0a, 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x64, 0x41, 0x74, 0x12, 0x1c, - 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x22, 0x19, 0x0a, 0x17, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x76, 0x0a, 0x22, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x47, 0x65, 0x74, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, + 0x47, 0x47, 0x45, 0x52, 0x10, 0x06, 0x22, 0x04, 0x08, 0x07, 0x10, 0x07, 0x22, 0x56, 0x0a, 0x10, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, + 0x12, 0x42, 0x0a, 0x0d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xd6, 0x09, 0x0a, 0x0d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3a, 0x0a, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, + 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0a, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x50, 0x6c, + 0x61, 0x6e, 0x12, 0x35, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x3d, 0x0a, 0x08, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x48, 0x0a, 0x0d, 0x6e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, + 0x74, 0x48, 0x00, 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0b, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x6c, + 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0a, 0x64, 0x69, 0x73, 0x61, 0x62, + 0x6c, 0x65, 0x41, 0x6c, 0x6c, 0x12, 0x2e, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x06, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x0f, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, + 0x39, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x52, 0x6f, 0x6c, 0x65, 0x42, 0x02, 0x18, 0x01, + 0x52, 0x08, 0x61, 0x75, 0x74, 0x68, 0x52, 0x6f, 0x6c, 0x65, 0x12, 0x4d, 0x0a, 0x12, 0x71, 0x75, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4f, 0x66, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x10, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x61, 0x78, + 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x69, 0x73, 0x6d, 0x18, 0x12, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x50, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x69, + 0x73, 0x6d, 0x12, 0x58, 0x0a, 0x16, 0x72, 0x61, 0x77, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x13, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x61, 0x77, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, + 0x61, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x72, 0x61, 0x77, 0x4f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x50, 0x0a, 0x12, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, + 0x6e, 0x74, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x11, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x40, + 0x0a, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x18, + 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, + 0x12, 0x27, 0x0a, 0x0f, 0x6f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x63, 0x61, + 0x63, 0x68, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x6f, 0x76, 0x65, 0x72, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x28, 0x0a, 0x04, 0x65, 0x6e, 0x76, + 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x45, 0x6e, 0x76, 0x73, 0x52, 0x04, 0x65, + 0x6e, 0x76, 0x73, 0x12, 0x16, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x18, 0x20, 0x03, 0x28, + 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x5d, 0x0a, 0x17, 0x65, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x45, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x52, 0x15, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x61, 0x0a, 0x19, 0x65, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x76, 0x5f, 0x61, 0x73, 0x73, 0x69, + 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x76, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, + 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x17, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, + 0x6e, 0x76, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x42, 0x18, 0x0a, + 0x16, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x76, + 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x6d, 0x0a, + 0x19, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x61, 0x75, 0x73, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x63, 0x61, 0x75, 0x73, 0x65, 0x22, 0x1c, 0x0a, 0x1a, + 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5d, 0x0a, 0x1f, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x47, + 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x65, 0x70, - 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x64, 0x65, 0x70, 0x74, 0x68, 0x22, - 0x4e, 0x0a, 0x23, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x2a, - 0x3e, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x41, - 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x58, 0x45, 0x43, 0x55, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x52, 0x43, 0x48, 0x49, 0x56, 0x45, 0x44, 0x10, 0x01, 0x42, - 0xba, 0x01, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x42, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, - 0x79, 0x74, 0x65, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x67, 0x65, 0x6e, - 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xa2, 0x02, 0x03, 0x46, 0x41, 0x58, 0xaa, 0x02, 0x0e, 0x46, 0x6c, - 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0xca, 0x02, 0x0e, 0x46, - 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0xe2, 0x02, 0x1a, - 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x47, - 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0f, 0x46, 0x6c, 0x79, - 0x74, 0x65, 0x69, 0x64, 0x6c, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x02, 0x69, 0x64, 0x22, 0x88, 0x02, 0x0a, 0x20, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x47, + 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, + 0x0a, 0x07, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x55, 0x72, 0x6c, 0x42, 0x6c, 0x6f, 0x62, 0x42, 0x02, 0x18, 0x01, 0x52, 0x07, 0x6f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, 0x33, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x55, 0x72, 0x6c, 0x42, 0x6c, 0x6f, 0x62, 0x42, 0x02, + 0x18, 0x01, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x0b, 0x66, 0x75, + 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x52, 0x0a, 0x66, 0x75, 0x6c, 0x6c, + 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x3c, 0x0a, 0x0c, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, + 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x52, 0x0b, 0x66, 0x75, 0x6c, 0x6c, 0x4f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x73, 0x22, 0x8a, 0x01, 0x0a, 0x16, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x3a, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x02, 0x69, 0x64, 0x12, 0x34, 0x0a, 0x05, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x22, 0xae, 0x01, 0x0a, 0x1b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x73, 0x12, 0x34, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x6f, 0x63, 0x63, 0x75, 0x72, + 0x72, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72, + 0x65, 0x64, 0x41, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, + 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, + 0x61, 0x6c, 0x22, 0x19, 0x0a, 0x17, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x76, 0x0a, + 0x22, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x02, 0x69, 0x64, 0x12, + 0x14, 0x0a, 0x05, 0x64, 0x65, 0x70, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, + 0x64, 0x65, 0x70, 0x74, 0x68, 0x22, 0x4e, 0x0a, 0x23, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x04, + 0x73, 0x70, 0x61, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x52, + 0x04, 0x73, 0x70, 0x61, 0x6e, 0x2a, 0x3e, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x58, 0x45, 0x43, 0x55, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x00, 0x12, 0x16, 0x0a, + 0x12, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x52, 0x43, 0x48, 0x49, + 0x56, 0x45, 0x44, 0x10, 0x01, 0x42, 0xba, 0x01, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x42, 0x0e, 0x45, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xa2, 0x02, 0x03, 0x46, 0x41, + 0x58, 0xaa, 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x41, 0x64, 0x6d, + 0x69, 0x6e, 0xca, 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x41, 0x64, + 0x6d, 0x69, 0x6e, 0xe2, 0x02, 0x1a, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x41, + 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0xea, 0x02, 0x0f, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x3a, 0x3a, 0x41, 0x64, 0x6d, + 0x69, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/flyteidl/gen/pb-go/flyteidl/admin/node_execution.pb.go b/flyteidl/gen/pb-go/flyteidl/admin/node_execution.pb.go index b4d9cb8c89..7f38db6da5 100644 --- a/flyteidl/gen/pb-go/flyteidl/admin/node_execution.pb.go +++ b/flyteidl/gen/pb-go/flyteidl/admin/node_execution.pb.go @@ -368,6 +368,8 @@ type NodeExecutionMetaData struct { // Boolean flag indicating if the node is an array node. This is intended to uniquely identify // array nodes from other nodes which can have is_parent_node as true. IsArray bool `protobuf:"varint,5,opt,name=is_array,json=isArray,proto3" json:"is_array,omitempty"` + // Whether this node is an eager node. + IsEager bool `protobuf:"varint,6,opt,name=is_eager,json=isEager,proto3" json:"is_eager,omitempty"` } func (x *NodeExecutionMetaData) Reset() { @@ -437,6 +439,13 @@ func (x *NodeExecutionMetaData) GetIsArray() bool { return false } +func (x *NodeExecutionMetaData) GetIsEager() bool { + if x != nil { + return x.IsEager + } + return false +} + // Request structure to retrieve a list of node execution entities. // See :ref:`ref_flyteidl.admin.NodeExecution` for more details type NodeExecutionList struct { @@ -1218,7 +1227,7 @@ var file_flyteidl_admin_node_execution_proto_rawDesc = []byte{ 0x32, 0x25, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x22, 0xba, 0x01, 0x0a, 0x15, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x61, 0x22, 0xd5, 0x01, 0x0a, 0x15, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x24, 0x0a, 0x0e, @@ -1229,150 +1238,152 @@ var file_flyteidl_admin_node_execution_proto_rawDesc = []byte{ 0x64, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x61, 0x72, 0x72, 0x61, 0x79, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x41, 0x72, 0x72, 0x61, 0x79, 0x22, 0x71, - 0x0a, 0x11, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4c, - 0x69, 0x73, 0x74, 0x12, 0x46, 0x0a, 0x0f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x65, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x66, - 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x4e, 0x6f, - 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6e, 0x6f, 0x64, - 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x22, 0xf6, 0x05, 0x0a, 0x14, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x6f, 0x73, 0x75, 0x72, 0x65, 0x12, 0x23, 0x0a, 0x0a, 0x6f, 0x75, - 0x74, 0x70, 0x75, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, - 0x18, 0x01, 0x48, 0x00, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x55, 0x72, 0x69, 0x12, - 0x35, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, - 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x48, 0x00, 0x52, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x40, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, - 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, - 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, - 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x6f, 0x75, - 0x74, 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x38, 0x0a, 0x05, 0x70, 0x68, 0x61, 0x73, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, - 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x68, 0x61, 0x73, 0x65, 0x52, 0x05, 0x70, 0x68, 0x61, - 0x73, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x35, 0x0a, - 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, - 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x07, 0x20, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x19, + 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x65, 0x61, 0x67, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x07, 0x69, 0x73, 0x45, 0x61, 0x67, 0x65, 0x72, 0x22, 0x71, 0x0a, 0x11, 0x4e, 0x6f, 0x64, + 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x46, + 0x0a, 0x0f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xf6, 0x05, 0x0a, + 0x14, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, + 0x6f, 0x73, 0x75, 0x72, 0x65, 0x12, 0x23, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, + 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, + 0x09, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x55, 0x72, 0x69, 0x12, 0x35, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x12, 0x40, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, + 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x44, + 0x61, 0x74, 0x61, 0x12, 0x38, 0x0a, 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x50, 0x68, 0x61, 0x73, 0x65, 0x52, 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x12, 0x39, 0x0a, + 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x5c, 0x0a, 0x16, 0x77, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x66, 0x6c, 0x79, - 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x48, 0x01, 0x52, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x64, 0x65, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x50, 0x0a, 0x12, 0x74, 0x61, 0x73, 0x6b, - 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x10, 0x74, 0x61, 0x73, 0x6b, 0x4e, 0x6f, - 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x19, 0x0a, 0x08, 0x64, 0x65, - 0x63, 0x6b, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, - 0x63, 0x6b, 0x55, 0x72, 0x69, 0x12, 0x2f, 0x0a, 0x14, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, - 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x11, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4a, 0x6f, 0x62, 0x53, - 0x70, 0x65, 0x63, 0x55, 0x72, 0x69, 0x42, 0x0f, 0x0a, 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, - 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x11, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x64, 0x0a, 0x14, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x12, 0x4c, 0x0a, 0x0b, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, - 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x52, 0x0b, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, - 0x22, 0xc0, 0x01, 0x0a, 0x10, 0x54, 0x61, 0x73, 0x6b, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x44, 0x0a, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x66, 0x6c, - 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x61, 0x74, 0x61, - 0x6c, 0x6f, 0x67, 0x43, 0x61, 0x63, 0x68, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0b, - 0x63, 0x61, 0x63, 0x68, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3f, 0x0a, 0x0b, 0x63, - 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x52, 0x0a, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x4b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x0e, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, - 0x55, 0x72, 0x69, 0x22, 0xce, 0x01, 0x0a, 0x1b, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x57, + 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x5c, 0x0a, 0x16, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, + 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x12, 0x29, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x02, 0x69, 0x64, 0x12, 0x53, - 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x66, 0x6c, 0x79, 0x74, - 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, - 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x6c, 0x6f, 0x73, 0x75, 0x72, - 0x65, 0x52, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x12, 0x2f, 0x0a, 0x14, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x6a, - 0x6f, 0x62, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x11, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, - 0x63, 0x55, 0x72, 0x69, 0x22, 0x55, 0x0a, 0x1b, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x26, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x02, 0x69, 0x64, 0x22, 0x96, 0x03, 0x0a, 0x1c, - 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x47, 0x65, 0x74, - 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x66, - 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x55, 0x72, - 0x6c, 0x42, 0x6c, 0x6f, 0x62, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, - 0x73, 0x12, 0x35, 0x0a, 0x07, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x55, 0x72, 0x6c, 0x42, 0x6c, 0x6f, 0x62, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x07, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x0b, 0x66, 0x75, 0x6c, 0x6c, - 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, - 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x52, 0x0a, 0x66, 0x75, 0x6c, 0x6c, 0x49, 0x6e, - 0x70, 0x75, 0x74, 0x73, 0x12, 0x3c, 0x0a, 0x0c, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6f, 0x75, 0x74, - 0x70, 0x75, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, - 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, - 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x52, 0x0b, 0x66, 0x75, 0x6c, 0x6c, 0x4f, 0x75, 0x74, 0x70, 0x75, - 0x74, 0x73, 0x12, 0x56, 0x0a, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x77, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x66, - 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x79, - 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x64, - 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x64, 0x79, 0x6e, 0x61, 0x6d, - 0x69, 0x63, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x38, 0x0a, 0x0a, 0x66, 0x6c, - 0x79, 0x74, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, - 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x46, 0x6c, 0x79, 0x74, 0x65, 0x55, 0x52, 0x4c, 0x73, 0x52, 0x09, 0x66, 0x6c, 0x79, 0x74, 0x65, - 0x55, 0x72, 0x6c, 0x73, 0x22, 0x57, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x44, 0x79, 0x6e, 0x61, 0x6d, - 0x69, 0x63, 0x4e, 0x6f, 0x64, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x26, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x02, 0x69, 0x64, 0x22, 0x72, 0x0a, - 0x1b, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4e, 0x6f, 0x64, 0x65, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x11, - 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, - 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x64, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x6c, 0x6f, 0x73, 0x75, 0x72, 0x65, 0x52, - 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x42, 0xbe, 0x01, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, - 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x42, 0x12, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, - 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, - 0x64, 0x6c, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, - 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xa2, 0x02, 0x03, 0x46, 0x41, - 0x58, 0xaa, 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x41, 0x64, 0x6d, - 0x69, 0x6e, 0xca, 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x41, 0x64, - 0x6d, 0x69, 0x6e, 0xe2, 0x02, 0x1a, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x41, - 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0xea, 0x02, 0x0f, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x3a, 0x3a, 0x41, 0x64, 0x6d, - 0x69, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x74, 0x61, 0x12, 0x50, 0x0a, 0x12, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x6e, 0x6f, 0x64, 0x65, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x48, 0x01, 0x52, 0x10, 0x74, 0x61, 0x73, 0x6b, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x19, 0x0a, 0x08, 0x64, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x72, + 0x69, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x63, 0x6b, 0x55, 0x72, 0x69, + 0x12, 0x2f, 0x0a, 0x14, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, + 0x73, 0x70, 0x65, 0x63, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, + 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x55, 0x72, + 0x69, 0x42, 0x0f, 0x0a, 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x42, 0x11, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x64, 0x0a, 0x14, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x4c, 0x0a, + 0x0b, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0b, + 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0xc0, 0x01, 0x0a, 0x10, + 0x54, 0x61, 0x73, 0x6b, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x44, 0x0a, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x43, 0x61, + 0x63, 0x68, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0b, 0x63, 0x61, 0x63, 0x68, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3f, 0x0a, 0x0b, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, + 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x61, 0x74, 0x61, + 0x6c, 0x6f, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0a, 0x63, 0x61, 0x74, + 0x61, 0x6c, 0x6f, 0x67, 0x4b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x72, 0x69, 0x22, 0xce, + 0x01, 0x0a, 0x1b, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x29, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x02, 0x69, 0x64, 0x12, 0x53, 0x0a, 0x11, 0x63, 0x6f, 0x6d, + 0x70, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x64, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x6c, 0x6f, 0x73, 0x75, 0x72, 0x65, 0x52, 0x10, 0x63, 0x6f, + 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x2f, + 0x0a, 0x14, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x73, 0x70, + 0x65, 0x63, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x64, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x55, 0x72, 0x69, 0x22, + 0x55, 0x0a, 0x1b, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x52, 0x02, 0x69, 0x64, 0x22, 0x96, 0x03, 0x0a, 0x1c, 0x4e, 0x6f, 0x64, 0x65, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x55, 0x72, 0x6c, 0x42, 0x6c, 0x6f, 0x62, + 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x35, 0x0a, 0x07, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x55, + 0x72, 0x6c, 0x42, 0x6c, 0x6f, 0x62, 0x42, 0x02, 0x18, 0x01, 0x52, 0x07, 0x6f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x0b, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, + 0x4d, 0x61, 0x70, 0x52, 0x0a, 0x66, 0x75, 0x6c, 0x6c, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, + 0x3c, 0x0a, 0x0c, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, + 0x52, 0x0b, 0x66, 0x75, 0x6c, 0x6c, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, 0x56, 0x0a, + 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x38, 0x0a, 0x0a, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x5f, 0x75, + 0x72, 0x6c, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x46, 0x6c, 0x79, 0x74, 0x65, + 0x55, 0x52, 0x4c, 0x73, 0x52, 0x09, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x55, 0x72, 0x6c, 0x73, 0x22, + 0x57, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4e, 0x6f, 0x64, + 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x36, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x64, + 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x52, 0x02, 0x69, 0x64, 0x22, 0x72, 0x0a, 0x1b, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x4e, 0x6f, 0x64, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x69, + 0x6c, 0x65, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x43, 0x6c, 0x6f, 0x73, 0x75, 0x72, 0x65, 0x52, 0x10, 0x63, 0x6f, 0x6d, 0x70, + 0x69, 0x6c, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x42, 0xbe, 0x01, 0x0a, + 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x42, 0x12, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x67, 0x65, + 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xa2, 0x02, 0x03, 0x46, 0x41, 0x58, 0xaa, 0x02, 0x0e, 0x46, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0xca, 0x02, 0x0e, + 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0xe2, 0x02, + 0x1a, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, + 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0f, 0x46, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/flyteidl/gen/pb-go/flyteidl/core/tasks.pb.go b/flyteidl/gen/pb-go/flyteidl/core/tasks.pb.go index 122ddce559..ceb9c71f1f 100644 --- a/flyteidl/gen/pb-go/flyteidl/core/tasks.pb.go +++ b/flyteidl/gen/pb-go/flyteidl/core/tasks.pb.go @@ -714,6 +714,9 @@ type TaskMetadata struct { PodTemplateName string `protobuf:"bytes,12,opt,name=pod_template_name,json=podTemplateName,proto3" json:"pod_template_name,omitempty"` // cache_ignore_input_vars is the input variables that should not be included when calculating hash for cache. CacheIgnoreInputVars []string `protobuf:"bytes,13,rep,name=cache_ignore_input_vars,json=cacheIgnoreInputVars,proto3" json:"cache_ignore_input_vars,omitempty"` + // is_eager indicates whether the task is eager or not. + // This would be used by CreateTask endpoint. + IsEager bool `protobuf:"varint,14,opt,name=is_eager,json=isEager,proto3" json:"is_eager,omitempty"` } func (x *TaskMetadata) Reset() { @@ -839,6 +842,13 @@ func (x *TaskMetadata) GetCacheIgnoreInputVars() []string { return nil } +func (x *TaskMetadata) GetIsEager() bool { + if x != nil { + return x.IsEager + } + return false +} + type isTaskMetadata_InterruptibleValue interface { isTaskMetadata_InterruptibleValue() } @@ -1692,7 +1702,7 @@ var file_flyteidl_core_tasks_proto_rawDesc = []byte{ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6c, 0x61, 0x76, 0x6f, 0x72, 0x22, 0x27, 0x0a, 0x0b, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x4f, 0x54, 0x48, 0x45, 0x52, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x4c, 0x59, 0x54, 0x45, - 0x5f, 0x53, 0x44, 0x4b, 0x10, 0x01, 0x22, 0xac, 0x05, 0x0a, 0x0c, 0x54, 0x61, 0x73, 0x6b, 0x4d, + 0x5f, 0x53, 0x44, 0x4b, 0x10, 0x01, 0x22, 0xc7, 0x05, 0x0a, 0x0c, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x72, @@ -1730,183 +1740,184 @@ var file_flyteidl_core_tasks_proto_rawDesc = []byte{ 0x0a, 0x17, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x76, 0x61, 0x72, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x63, 0x61, 0x63, 0x68, 0x65, 0x49, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x49, 0x6e, 0x70, 0x75, - 0x74, 0x56, 0x61, 0x72, 0x73, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x15, - 0x0a, 0x13, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xd6, 0x05, 0x0a, 0x0c, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x12, 0x29, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x02, 0x69, - 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, - 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3b, - 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, - 0x52, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, - 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x12, 0x38, 0x0a, 0x09, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x18, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x6b, 0x38, 0x73, 0x5f, 0x70, 0x6f, - 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, - 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x48, 0x00, - 0x52, 0x06, 0x6b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x12, 0x26, 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18, - 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x71, 0x6c, 0x48, 0x00, 0x52, 0x03, 0x73, 0x71, 0x6c, - 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x74, 0x61, 0x73, - 0x6b, 0x54, 0x79, 0x70, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x10, - 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, - 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x43, - 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x0f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, - 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x4f, 0x0a, 0x12, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x64, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x11, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, - 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x39, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0x36, - 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x12, - 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x72, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xfc, 0x03, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, - 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6d, - 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x12, 0x36, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x66, 0x6c, - 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, - 0x12, 0x2d, 0x0a, 0x03, 0x65, 0x6e, 0x76, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, - 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4b, 0x65, - 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x50, 0x61, 0x69, 0x72, 0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, - 0x37, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x50, 0x61, 0x69, 0x72, 0x42, 0x02, 0x18, 0x01, - 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x0a, 0x05, 0x70, 0x6f, 0x72, 0x74, - 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, - 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x50, 0x6f, 0x72, 0x74, 0x52, 0x05, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0b, - 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x49, 0x0a, 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, - 0x41, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x52, 0x0c, 0x61, 0x72, - 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x22, 0x49, 0x0a, 0x0c, 0x41, 0x72, - 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4d, 0x44, 0x36, 0x34, - 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x52, 0x4d, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, - 0x06, 0x41, 0x52, 0x4d, 0x5f, 0x56, 0x36, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x52, 0x4d, - 0x5f, 0x56, 0x37, 0x10, 0x04, 0x22, 0xb5, 0x02, 0x0a, 0x0a, 0x49, 0x4f, 0x53, 0x74, 0x72, 0x61, - 0x74, 0x65, 0x67, 0x79, 0x12, 0x4b, 0x0a, 0x0d, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, - 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x66, 0x6c, - 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x4f, 0x53, 0x74, - 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x4d, - 0x6f, 0x64, 0x65, 0x52, 0x0c, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x6f, 0x64, - 0x65, 0x12, 0x45, 0x0a, 0x0b, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6d, 0x6f, 0x64, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x74, 0x56, 0x61, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x65, 0x61, 0x67, 0x65, + 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x45, 0x61, 0x67, 0x65, 0x72, + 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x15, 0x0a, 0x13, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x22, 0xd6, 0x05, 0x0a, 0x0c, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, + 0x65, 0x12, 0x29, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3b, 0x0a, 0x09, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x79, 0x70, + 0x65, 0x64, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x52, 0x09, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, + 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x12, 0x38, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x12, 0x30, 0x0a, 0x07, 0x6b, 0x38, 0x73, 0x5f, 0x70, 0x6f, 0x64, 0x18, 0x11, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x4b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x48, 0x00, 0x52, 0x06, 0x6b, 0x38, 0x73, + 0x50, 0x6f, 0x64, 0x12, 0x26, 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x12, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x53, 0x71, 0x6c, 0x48, 0x00, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x12, 0x2a, 0x0a, 0x11, 0x74, + 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x52, 0x0f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x12, 0x4f, 0x0a, 0x12, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x5f, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x52, 0x11, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x10, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x39, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, + 0x08, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0x36, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x50, 0x6f, 0x72, + 0x74, 0x22, 0xfc, 0x03, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, + 0x14, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, + 0x12, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x61, + 0x72, 0x67, 0x73, 0x12, 0x36, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x03, 0x65, + 0x6e, 0x76, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x50, 0x61, 0x69, 0x72, 0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, 0x37, 0x0a, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x50, 0x61, 0x69, 0x72, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x32, 0x0a, 0x05, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, + 0x52, 0x05, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x44, 0x61, 0x74, + 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, + 0x64, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x49, 0x0a, 0x0c, 0x61, 0x72, + 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x25, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x41, 0x72, 0x63, 0x68, 0x69, + 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x52, 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, + 0x63, 0x74, 0x75, 0x72, 0x65, 0x22, 0x49, 0x0a, 0x0c, 0x41, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, + 0x63, 0x74, 0x75, 0x72, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4d, 0x44, 0x36, 0x34, 0x10, 0x01, 0x12, 0x09, 0x0a, + 0x05, 0x41, 0x52, 0x4d, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x52, 0x4d, 0x5f, + 0x56, 0x36, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x52, 0x4d, 0x5f, 0x56, 0x37, 0x10, 0x04, + 0x22, 0xb5, 0x02, 0x0a, 0x0a, 0x49, 0x4f, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, + 0x4b, 0x0a, 0x0d, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6d, 0x6f, 0x64, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x4f, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, - 0x79, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0a, 0x75, 0x70, - 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0x4c, 0x0a, 0x0c, 0x44, 0x6f, 0x77, 0x6e, - 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x4f, 0x57, 0x4e, - 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x45, 0x41, 0x47, 0x45, 0x52, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, - 0x44, 0x4f, 0x57, 0x4e, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x10, - 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x4f, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x44, 0x4f, 0x57, 0x4e, - 0x4c, 0x4f, 0x41, 0x44, 0x10, 0x02, 0x22, 0x45, 0x0a, 0x0a, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, - 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x4f, - 0x4e, 0x5f, 0x45, 0x58, 0x49, 0x54, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x55, 0x50, 0x4c, 0x4f, - 0x41, 0x44, 0x5f, 0x45, 0x41, 0x47, 0x45, 0x52, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x4f, - 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x10, 0x02, 0x22, 0xa7, 0x02, - 0x0a, 0x11, 0x44, 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1d, 0x0a, - 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, - 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x49, 0x0a, - 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, - 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x44, 0x61, - 0x74, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x3a, 0x0a, 0x0b, 0x69, 0x6f, 0x5f, 0x73, - 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x4f, - 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x0a, 0x69, 0x6f, 0x53, 0x74, 0x72, 0x61, - 0x74, 0x65, 0x67, 0x79, 0x22, 0x31, 0x0a, 0x10, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, - 0x61, 0x70, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x08, 0x0a, 0x04, 0x4a, 0x53, 0x4f, 0x4e, - 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x59, 0x41, 0x4d, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, - 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x10, 0x02, 0x22, 0xbd, 0x01, 0x0a, 0x06, 0x4b, 0x38, 0x73, 0x50, - 0x6f, 0x64, 0x12, 0x3c, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, + 0x79, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0c, + 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x45, 0x0a, 0x0b, + 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x49, 0x4f, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x55, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0a, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x4d, + 0x6f, 0x64, 0x65, 0x22, 0x4c, 0x0a, 0x0c, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x4d, + 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x4f, 0x57, 0x4e, 0x4c, 0x4f, 0x41, 0x44, 0x5f, + 0x45, 0x41, 0x47, 0x45, 0x52, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x4f, 0x57, 0x4e, 0x4c, + 0x4f, 0x41, 0x44, 0x5f, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, + 0x44, 0x4f, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x44, 0x4f, 0x57, 0x4e, 0x4c, 0x4f, 0x41, 0x44, 0x10, + 0x02, 0x22, 0x45, 0x0a, 0x0a, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x12, + 0x12, 0x0a, 0x0e, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x4f, 0x4e, 0x5f, 0x45, 0x58, 0x49, + 0x54, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x45, 0x41, + 0x47, 0x45, 0x52, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x4f, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, + 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x10, 0x02, 0x22, 0xa7, 0x02, 0x0a, 0x11, 0x44, 0x61, 0x74, + 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, + 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, + 0x70, 0x75, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x49, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x61, + 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, + 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x12, 0x3a, 0x0a, 0x0b, 0x69, 0x6f, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, + 0x67, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x4f, 0x53, 0x74, 0x72, 0x61, 0x74, + 0x65, 0x67, 0x79, 0x52, 0x0a, 0x69, 0x6f, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x22, + 0x31, 0x0a, 0x10, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x12, 0x08, 0x0a, 0x04, 0x4a, 0x53, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, + 0x04, 0x59, 0x41, 0x4d, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x52, 0x4f, 0x54, 0x4f, + 0x10, 0x02, 0x22, 0xbd, 0x01, 0x0a, 0x06, 0x4b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x12, 0x3c, 0x0a, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x4b, 0x38, 0x73, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x32, 0x0a, 0x08, 0x70, + 0x6f, 0x64, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x07, 0x70, 0x6f, 0x64, 0x53, 0x70, 0x65, 0x63, 0x12, + 0x41, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4b, 0x38, 0x73, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x32, 0x0a, 0x08, 0x70, 0x6f, 0x64, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x07, 0x70, 0x6f, 0x64, - 0x53, 0x70, 0x65, 0x63, 0x12, 0x41, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, - 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4c, 0x6f, - 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x64, 0x61, 0x74, - 0x61, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xa9, 0x02, 0x0a, 0x11, 0x4b, 0x38, 0x73, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x44, 0x0a, - 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, - 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4b, 0x38, - 0x73, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x12, 0x53, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x69, 0x6e, 0x67, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x22, 0xa9, 0x02, 0x0a, 0x11, 0x4b, 0x38, 0x73, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x44, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4b, 0x38, 0x73, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, - 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0x92, 0x01, 0x0a, 0x03, 0x53, 0x71, 0x6c, 0x12, 0x1c, 0x0a, 0x09, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x07, 0x64, 0x69, 0x61, - 0x6c, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, - 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x71, 0x6c, 0x2e, 0x44, - 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74, 0x52, 0x07, 0x64, 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74, 0x22, - 0x37, 0x0a, 0x07, 0x44, 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, - 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x4e, 0x53, - 0x49, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, 0x09, 0x0a, - 0x05, 0x4f, 0x54, 0x48, 0x45, 0x52, 0x10, 0x03, 0x42, 0xb0, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, - 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x42, 0x0a, - 0x54, 0x61, 0x73, 0x6b, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, - 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, - 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, - 0x69, 0x64, 0x6c, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0xa2, 0x02, 0x03, 0x46, 0x43, 0x58, 0xaa, 0x02, - 0x0d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x43, 0x6f, 0x72, 0x65, 0xca, 0x02, - 0x0d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x43, 0x6f, 0x72, 0x65, 0xe2, 0x02, - 0x19, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x43, 0x6f, 0x72, 0x65, 0x5c, 0x47, - 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0e, 0x46, 0x6c, 0x79, - 0x74, 0x65, 0x69, 0x64, 0x6c, 0x3a, 0x3a, 0x43, 0x6f, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x63, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x53, + 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x4b, 0x38, 0x73, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, + 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x92, + 0x01, 0x0a, 0x03, 0x53, 0x71, 0x6c, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x07, 0x64, 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x71, 0x6c, 0x2e, 0x44, 0x69, 0x61, 0x6c, 0x65, 0x63, + 0x74, 0x52, 0x07, 0x64, 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74, 0x22, 0x37, 0x0a, 0x07, 0x44, 0x69, + 0x61, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x4e, 0x53, 0x49, 0x10, 0x01, 0x12, 0x08, + 0x0a, 0x04, 0x48, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x4f, 0x54, 0x48, 0x45, + 0x52, 0x10, 0x03, 0x42, 0xb0, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x42, 0x0a, 0x54, 0x61, 0x73, 0x6b, 0x73, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x67, 0x65, 0x6e, 0x2f, + 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0xa2, 0x02, 0x03, 0x46, 0x43, 0x58, 0xaa, 0x02, 0x0d, 0x46, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x43, 0x6f, 0x72, 0x65, 0xca, 0x02, 0x0d, 0x46, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x43, 0x6f, 0x72, 0x65, 0xe2, 0x02, 0x19, 0x46, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x43, 0x6f, 0x72, 0x65, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x3a, 0x3a, 0x43, 0x6f, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/flyteidl/gen/pb-go/flyteidl/core/workflow.pb.go b/flyteidl/gen/pb-go/flyteidl/core/workflow.pb.go index 14ac613ea6..d484bd7ae9 100644 --- a/flyteidl/gen/pb-go/flyteidl/core/workflow.pb.go +++ b/flyteidl/gen/pb-go/flyteidl/core/workflow.pb.go @@ -10,7 +10,7 @@ import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" - _ "google.golang.org/protobuf/types/known/wrapperspb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -785,6 +785,8 @@ type ArrayNode struct { SuccessCriteria isArrayNode_SuccessCriteria `protobuf_oneof:"success_criteria"` // execution_mode determines the execution path for ArrayNode. ExecutionMode ArrayNode_ExecutionMode `protobuf:"varint,5,opt,name=execution_mode,json=executionMode,proto3,enum=flyteidl.core.ArrayNode_ExecutionMode" json:"execution_mode,omitempty"` + // Indicates whether the sub node's original interface was altered + IsOriginalSubNodeInterface *wrapperspb.BoolValue `protobuf:"bytes,6,opt,name=is_original_sub_node_interface,json=isOriginalSubNodeInterface,proto3" json:"is_original_sub_node_interface,omitempty"` } func (x *ArrayNode) Reset() { @@ -868,6 +870,13 @@ func (x *ArrayNode) GetExecutionMode() ArrayNode_ExecutionMode { return ArrayNode_MINIMAL_STATE } +func (x *ArrayNode) GetIsOriginalSubNodeInterface() *wrapperspb.BoolValue { + if x != nil { + return x.IsOriginalSubNodeInterface + } + return nil +} + type isArrayNode_ParallelismOption interface { isArrayNode_ParallelismOption() } @@ -1801,7 +1810,7 @@ var file_flyteidl_core_workflow_proto_rawDesc = []byte{ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x05, 0x73, 0x6c, 0x65, 0x65, 0x70, 0x42, 0x0b, - 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xda, 0x02, 0x0a, 0x09, + 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xba, 0x03, 0x0a, 0x09, 0x41, 0x72, 0x72, 0x61, 0x79, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x27, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, @@ -1817,7 +1826,13 @@ var file_flyteidl_core_workflow_proto_rawDesc = []byte{ 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x72, 0x72, 0x61, 0x79, 0x4e, 0x6f, 0x64, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0d, 0x65, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0x32, 0x0a, 0x0d, 0x45, 0x78, + 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x5e, 0x0a, 0x1e, 0x69, 0x73, + 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x6e, 0x6f, + 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, + 0x69, 0x73, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x53, 0x75, 0x62, 0x4e, 0x6f, 0x64, + 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x22, 0x32, 0x0a, 0x0d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x11, 0x0a, 0x0d, 0x4d, 0x49, 0x4e, 0x49, 0x4d, 0x41, 0x4c, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x46, 0x55, 0x4c, 0x4c, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x10, 0x01, 0x42, 0x14, @@ -2020,13 +2035,14 @@ var file_flyteidl_core_workflow_proto_goTypes = []interface{}{ (*Identifier)(nil), // 23: flyteidl.core.Identifier (*LiteralType)(nil), // 24: flyteidl.core.LiteralType (*durationpb.Duration)(nil), // 25: google.protobuf.Duration - (*RetryStrategy)(nil), // 26: flyteidl.core.RetryStrategy - (*Binding)(nil), // 27: flyteidl.core.Binding - (*QualityOfService)(nil), // 28: flyteidl.core.QualityOfService - (*TypedInterface)(nil), // 29: flyteidl.core.TypedInterface - (*Resources)(nil), // 30: flyteidl.core.Resources - (*ExtendedResources)(nil), // 31: flyteidl.core.ExtendedResources - (*LiteralMap)(nil), // 32: flyteidl.core.LiteralMap + (*wrapperspb.BoolValue)(nil), // 26: google.protobuf.BoolValue + (*RetryStrategy)(nil), // 27: flyteidl.core.RetryStrategy + (*Binding)(nil), // 28: flyteidl.core.Binding + (*QualityOfService)(nil), // 29: flyteidl.core.QualityOfService + (*TypedInterface)(nil), // 30: flyteidl.core.TypedInterface + (*Resources)(nil), // 31: flyteidl.core.Resources + (*ExtendedResources)(nil), // 32: flyteidl.core.ExtendedResources + (*LiteralMap)(nil), // 33: flyteidl.core.LiteralMap } var file_flyteidl_core_workflow_proto_depIdxs = []int32{ 21, // 0: flyteidl.core.IfBlock.condition:type_name -> flyteidl.core.BooleanExpression @@ -2047,36 +2063,37 @@ var file_flyteidl_core_workflow_proto_depIdxs = []int32{ 9, // 15: flyteidl.core.GateNode.sleep:type_name -> flyteidl.core.SleepCondition 14, // 16: flyteidl.core.ArrayNode.node:type_name -> flyteidl.core.Node 0, // 17: flyteidl.core.ArrayNode.execution_mode:type_name -> flyteidl.core.ArrayNode.ExecutionMode - 25, // 18: flyteidl.core.NodeMetadata.timeout:type_name -> google.protobuf.Duration - 26, // 19: flyteidl.core.NodeMetadata.retries:type_name -> flyteidl.core.RetryStrategy - 12, // 20: flyteidl.core.Node.metadata:type_name -> flyteidl.core.NodeMetadata - 27, // 21: flyteidl.core.Node.inputs:type_name -> flyteidl.core.Binding - 13, // 22: flyteidl.core.Node.output_aliases:type_name -> flyteidl.core.Alias - 5, // 23: flyteidl.core.Node.task_node:type_name -> flyteidl.core.TaskNode - 6, // 24: flyteidl.core.Node.workflow_node:type_name -> flyteidl.core.WorkflowNode - 4, // 25: flyteidl.core.Node.branch_node:type_name -> flyteidl.core.BranchNode - 10, // 26: flyteidl.core.Node.gate_node:type_name -> flyteidl.core.GateNode - 11, // 27: flyteidl.core.Node.array_node:type_name -> flyteidl.core.ArrayNode - 28, // 28: flyteidl.core.WorkflowMetadata.quality_of_service:type_name -> flyteidl.core.QualityOfService - 1, // 29: flyteidl.core.WorkflowMetadata.on_failure:type_name -> flyteidl.core.WorkflowMetadata.OnFailurePolicy - 20, // 30: flyteidl.core.WorkflowMetadata.tags:type_name -> flyteidl.core.WorkflowMetadata.TagsEntry - 23, // 31: flyteidl.core.WorkflowTemplate.id:type_name -> flyteidl.core.Identifier - 15, // 32: flyteidl.core.WorkflowTemplate.metadata:type_name -> flyteidl.core.WorkflowMetadata - 29, // 33: flyteidl.core.WorkflowTemplate.interface:type_name -> flyteidl.core.TypedInterface - 14, // 34: flyteidl.core.WorkflowTemplate.nodes:type_name -> flyteidl.core.Node - 27, // 35: flyteidl.core.WorkflowTemplate.outputs:type_name -> flyteidl.core.Binding - 14, // 36: flyteidl.core.WorkflowTemplate.failure_node:type_name -> flyteidl.core.Node - 16, // 37: flyteidl.core.WorkflowTemplate.metadata_defaults:type_name -> flyteidl.core.WorkflowMetadataDefaults - 30, // 38: flyteidl.core.TaskNodeOverrides.resources:type_name -> flyteidl.core.Resources - 31, // 39: flyteidl.core.TaskNodeOverrides.extended_resources:type_name -> flyteidl.core.ExtendedResources - 23, // 40: flyteidl.core.LaunchPlanTemplate.id:type_name -> flyteidl.core.Identifier - 29, // 41: flyteidl.core.LaunchPlanTemplate.interface:type_name -> flyteidl.core.TypedInterface - 32, // 42: flyteidl.core.LaunchPlanTemplate.fixed_inputs:type_name -> flyteidl.core.LiteralMap - 43, // [43:43] is the sub-list for method output_type - 43, // [43:43] is the sub-list for method input_type - 43, // [43:43] is the sub-list for extension type_name - 43, // [43:43] is the sub-list for extension extendee - 0, // [0:43] is the sub-list for field type_name + 26, // 18: flyteidl.core.ArrayNode.is_original_sub_node_interface:type_name -> google.protobuf.BoolValue + 25, // 19: flyteidl.core.NodeMetadata.timeout:type_name -> google.protobuf.Duration + 27, // 20: flyteidl.core.NodeMetadata.retries:type_name -> flyteidl.core.RetryStrategy + 12, // 21: flyteidl.core.Node.metadata:type_name -> flyteidl.core.NodeMetadata + 28, // 22: flyteidl.core.Node.inputs:type_name -> flyteidl.core.Binding + 13, // 23: flyteidl.core.Node.output_aliases:type_name -> flyteidl.core.Alias + 5, // 24: flyteidl.core.Node.task_node:type_name -> flyteidl.core.TaskNode + 6, // 25: flyteidl.core.Node.workflow_node:type_name -> flyteidl.core.WorkflowNode + 4, // 26: flyteidl.core.Node.branch_node:type_name -> flyteidl.core.BranchNode + 10, // 27: flyteidl.core.Node.gate_node:type_name -> flyteidl.core.GateNode + 11, // 28: flyteidl.core.Node.array_node:type_name -> flyteidl.core.ArrayNode + 29, // 29: flyteidl.core.WorkflowMetadata.quality_of_service:type_name -> flyteidl.core.QualityOfService + 1, // 30: flyteidl.core.WorkflowMetadata.on_failure:type_name -> flyteidl.core.WorkflowMetadata.OnFailurePolicy + 20, // 31: flyteidl.core.WorkflowMetadata.tags:type_name -> flyteidl.core.WorkflowMetadata.TagsEntry + 23, // 32: flyteidl.core.WorkflowTemplate.id:type_name -> flyteidl.core.Identifier + 15, // 33: flyteidl.core.WorkflowTemplate.metadata:type_name -> flyteidl.core.WorkflowMetadata + 30, // 34: flyteidl.core.WorkflowTemplate.interface:type_name -> flyteidl.core.TypedInterface + 14, // 35: flyteidl.core.WorkflowTemplate.nodes:type_name -> flyteidl.core.Node + 28, // 36: flyteidl.core.WorkflowTemplate.outputs:type_name -> flyteidl.core.Binding + 14, // 37: flyteidl.core.WorkflowTemplate.failure_node:type_name -> flyteidl.core.Node + 16, // 38: flyteidl.core.WorkflowTemplate.metadata_defaults:type_name -> flyteidl.core.WorkflowMetadataDefaults + 31, // 39: flyteidl.core.TaskNodeOverrides.resources:type_name -> flyteidl.core.Resources + 32, // 40: flyteidl.core.TaskNodeOverrides.extended_resources:type_name -> flyteidl.core.ExtendedResources + 23, // 41: flyteidl.core.LaunchPlanTemplate.id:type_name -> flyteidl.core.Identifier + 30, // 42: flyteidl.core.LaunchPlanTemplate.interface:type_name -> flyteidl.core.TypedInterface + 33, // 43: flyteidl.core.LaunchPlanTemplate.fixed_inputs:type_name -> flyteidl.core.LiteralMap + 44, // [44:44] is the sub-list for method output_type + 44, // [44:44] is the sub-list for method input_type + 44, // [44:44] is the sub-list for extension type_name + 44, // [44:44] is the sub-list for extension extendee + 0, // [0:44] is the sub-list for field type_name } func init() { file_flyteidl_core_workflow_proto_init() } diff --git a/flyteidl/gen/pb-go/flyteidl/event/event.pb.go b/flyteidl/gen/pb-go/flyteidl/event/event.pb.go index 963ed02ff6..554cd70c13 100644 --- a/flyteidl/gen/pb-go/flyteidl/event/event.pb.go +++ b/flyteidl/gen/pb-go/flyteidl/event/event.pb.go @@ -276,6 +276,8 @@ type NodeExecutionEvent struct { // if the relevant execution entity is was registered, or dynamic. This field indicates that the target_entity ID, // as well as task IDs in any corresponding Task Executions, should not be used to looked up the task in Admin's db. IsInDynamicChain bool `protobuf:"varint,24,opt,name=is_in_dynamic_chain,json=isInDynamicChain,proto3" json:"is_in_dynamic_chain,omitempty"` + // Whether this node launched an eager task. + IsEager bool `protobuf:"varint,25,opt,name=is_eager,json=isEager,proto3" json:"is_eager,omitempty"` } func (x *NodeExecutionEvent) Reset() { @@ -499,6 +501,13 @@ func (x *NodeExecutionEvent) GetIsInDynamicChain() bool { return false } +func (x *NodeExecutionEvent) GetIsEager() bool { + if x != nil { + return x.IsEager + } + return false +} + type isNodeExecutionEvent_InputValue interface { isNodeExecutionEvent_InputValue() } @@ -1497,7 +1506,7 @@ var file_flyteidl_event_event_proto_rawDesc = []byte{ 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, 0x61, 0x42, 0x0f, 0x0a, 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x22, 0x99, 0x0a, 0x0a, 0x12, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x22, 0xb4, 0x0a, 0x0a, 0x12, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, @@ -1575,196 +1584,197 @@ var file_flyteidl_event_event_proto_rawDesc = []byte{ 0x72, 0x67, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x2d, 0x0a, 0x13, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x18, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x49, 0x6e, 0x44, 0x79, 0x6e, - 0x61, 0x6d, 0x69, 0x63, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x42, 0x0d, 0x0a, 0x0b, 0x69, 0x6e, 0x70, - 0x75, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d, 0x6f, 0x75, 0x74, 0x70, - 0x75, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x11, 0x0a, 0x0f, 0x74, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x65, 0x0a, 0x14, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x0c, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x66, 0x6c, 0x79, - 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0b, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x49, 0x64, 0x22, 0xf1, 0x02, 0x0a, 0x10, 0x54, 0x61, 0x73, 0x6b, 0x4e, 0x6f, 0x64, 0x65, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x44, 0x0a, 0x0c, 0x63, 0x61, 0x63, 0x68, - 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, + 0x61, 0x6d, 0x69, 0x63, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73, 0x5f, + 0x65, 0x61, 0x67, 0x65, 0x72, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x45, + 0x61, 0x67, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x42, 0x11, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x65, 0x0a, 0x14, 0x57, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, + 0x4d, 0x0a, 0x0c, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x52, 0x0b, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0xf1, + 0x02, 0x0a, 0x10, 0x54, 0x61, 0x73, 0x6b, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x44, 0x0a, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, + 0x67, 0x43, 0x61, 0x63, 0x68, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0b, 0x63, 0x61, + 0x63, 0x68, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3f, 0x0a, 0x0b, 0x63, 0x61, 0x74, + 0x61, 0x6c, 0x6f, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, - 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x43, 0x61, 0x63, 0x68, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x0b, 0x63, 0x61, 0x63, 0x68, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3f, - 0x0a, 0x0b, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x52, 0x0a, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x4b, 0x65, 0x79, 0x12, - 0x57, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x66, 0x6c, - 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x61, 0x74, 0x61, - 0x6c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x11, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x68, 0x65, 0x63, - 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x72, 0x69, 0x12, - 0x56, 0x0a, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x66, 0x6c, 0x79, 0x74, - 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, - 0x69, 0x63, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x22, 0xce, 0x01, 0x0a, 0x1b, 0x44, 0x79, 0x6e, 0x61, - 0x6d, 0x69, 0x63, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x64, 0x65, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x29, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x53, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, - 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, - 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x6c, - 0x6f, 0x73, 0x75, 0x72, 0x65, 0x52, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x64, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x2f, 0x0a, 0x14, 0x64, 0x79, 0x6e, 0x61, 0x6d, - 0x69, 0x63, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x5f, 0x75, 0x72, 0x69, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4a, 0x6f, - 0x62, 0x53, 0x70, 0x65, 0x63, 0x55, 0x72, 0x69, 0x22, 0x55, 0x0a, 0x1b, 0x50, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x36, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0a, + 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x4b, 0x65, 0x79, 0x12, 0x57, 0x0a, 0x12, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x52, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x11, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x72, 0x69, 0x12, 0x56, 0x0a, 0x10, 0x64, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x52, 0x0f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x22, 0xce, 0x01, 0x0a, 0x1b, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x12, 0x29, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x02, 0x69, 0x64, 0x12, 0x53, 0x0a, + 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, + 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x6c, 0x6f, 0x73, 0x75, 0x72, 0x65, + 0x52, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x12, 0x2f, 0x0a, 0x14, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x6a, 0x6f, + 0x62, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x11, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, + 0x55, 0x72, 0x69, 0x22, 0x55, 0x0a, 0x1b, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x73, + 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x12, 0x36, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, + 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x02, 0x69, 0x64, 0x22, 0x36, 0x0a, 0x1b, 0x50, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, + 0x49, 0x64, 0x22, 0x62, 0x0a, 0x0b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x6f, 0x63, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x6f, 0x63, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x64, 0x41, 0x74, 0x22, 0x97, 0x08, 0x0a, 0x12, 0x54, 0x61, 0x73, 0x6b, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x32, 0x0a, + 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, + 0x64, 0x12, 0x5f, 0x0a, 0x18, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, + 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x02, 0x69, 0x64, 0x22, - 0x36, 0x0a, 0x1b, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x17, - 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x22, 0x62, 0x0a, 0x0b, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x3b, - 0x0a, 0x0b, 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, + 0x6f, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x15, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x61, 0x74, 0x74, 0x65, + 0x6d, 0x70, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, + 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x12, 0x38, 0x0a, 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x68, 0x61, 0x73, 0x65, 0x52, 0x05, 0x70, 0x68, 0x61, 0x73, + 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x2a, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x3b, + 0x0a, 0x0b, 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x0a, 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x64, 0x41, 0x74, 0x22, 0x97, 0x08, 0x0a, 0x12, - 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x0a, 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x64, 0x41, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x69, + 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, + 0x52, 0x08, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x55, 0x72, 0x69, 0x12, 0x3a, 0x0a, 0x0a, 0x69, 0x6e, + 0x70, 0x75, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, + 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x09, 0x69, 0x6e, 0x70, + 0x75, 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x5f, 0x75, 0x72, 0x69, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x09, 0x6f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x55, 0x72, 0x69, 0x12, 0x35, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x48, 0x01, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x3c, + 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x06, - 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x5f, 0x0a, 0x18, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, - 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x52, 0x15, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x74, 0x72, 0x79, - 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, - 0x72, 0x65, 0x74, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x12, 0x38, 0x0a, 0x05, - 0x70, 0x68, 0x61, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x66, 0x6c, - 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x68, 0x61, 0x73, 0x65, 0x52, - 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, - 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x6f, - 0x64, 0x75, 0x63, 0x65, 0x72, 0x49, 0x64, 0x12, 0x2a, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x52, 0x04, 0x6c, - 0x6f, 0x67, 0x73, 0x12, 0x3b, 0x0a, 0x0b, 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x64, 0x5f, - 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x48, 0x01, + 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x38, 0x0a, 0x0b, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x61, 0x73, 0x65, 0x5f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x70, + 0x68, 0x61, 0x73, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x06, 0x72, + 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x07, 0x72, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x73, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, + 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x07, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x73, 0x12, 0x1b, + 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x08, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x54, + 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x23, + 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x12, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, + 0x61, 0x74, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x64, 0x41, 0x74, - 0x12, 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x55, 0x72, 0x69, 0x12, - 0x3a, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x13, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x48, 0x00, - 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0a, 0x6f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, - 0x01, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x55, 0x72, 0x69, 0x12, 0x35, 0x0a, 0x05, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x66, 0x6c, - 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x48, 0x01, 0x52, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x12, 0x3c, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x64, 0x61, - 0x74, 0x61, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, - 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, - 0x4d, 0x61, 0x70, 0x48, 0x01, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, - 0x61, 0x12, 0x38, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x69, 0x6e, 0x66, 0x6f, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, - 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x70, - 0x68, 0x61, 0x73, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0c, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x0c, 0x70, 0x68, 0x61, 0x73, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x1a, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x07, - 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x73, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, - 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x07, 0x72, 0x65, 0x61, 0x73, - 0x6f, 0x6e, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x41, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6f, - 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x72, - 0x74, 0x65, 0x64, 0x41, 0x74, 0x42, 0x0d, 0x0a, 0x0b, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x9e, 0x02, 0x0a, 0x14, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1f, - 0x0a, 0x0b, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, - 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, - 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x61, - 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x72, 0x65, - 0x74, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x12, 0x38, 0x0a, 0x05, 0x70, 0x68, - 0x61, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x66, 0x6c, 0x79, 0x74, - 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x68, 0x61, 0x73, 0x65, 0x52, 0x05, 0x70, - 0x68, 0x61, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x66, 0x6c, 0x79, - 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c, - 0x6f, 0x67, 0x43, 0x61, 0x63, 0x68, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0b, 0x63, - 0x61, 0x63, 0x68, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x04, 0x6c, 0x6f, - 0x67, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, - 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, - 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x22, 0x5b, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x29, 0x0a, 0x10, 0x61, 0x6c, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x22, 0x9d, 0x03, 0x0a, 0x15, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x25, 0x0a, - 0x0e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x53, 0x0a, 0x12, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x11, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x4e, 0x0a, 0x12, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, - 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, - 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x5a, 0x0a, 0x0e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x33, - 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, - 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6c, - 0x61, 0x73, 0x73, 0x52, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6c, 0x61, - 0x73, 0x73, 0x22, 0x2f, 0x0a, 0x0d, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6c, - 0x61, 0x73, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, - 0x12, 0x11, 0x0a, 0x0d, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x52, 0x55, 0x50, 0x54, 0x49, 0x42, 0x4c, - 0x45, 0x10, 0x01, 0x42, 0xb6, 0x01, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, - 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, - 0x79, 0x74, 0x65, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x67, 0x65, 0x6e, - 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0xa2, 0x02, 0x03, 0x46, 0x45, 0x58, 0xaa, 0x02, 0x0e, 0x46, 0x6c, - 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0xca, 0x02, 0x0e, 0x46, - 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x45, 0x76, 0x65, 0x6e, 0x74, 0xe2, 0x02, 0x1a, - 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x5c, 0x47, - 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0f, 0x46, 0x6c, 0x79, - 0x74, 0x65, 0x69, 0x64, 0x6c, 0x3a, 0x3a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, + 0x42, 0x0d, 0x0a, 0x0b, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, + 0x0f, 0x0a, 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x22, 0x9e, 0x02, 0x0a, 0x14, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x41, 0x74, + 0x74, 0x65, 0x6d, 0x70, 0x74, 0x12, 0x38, 0x0a, 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x50, 0x68, 0x61, 0x73, 0x65, 0x52, 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x12, + 0x44, 0x0a, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x43, 0x61, 0x63, + 0x68, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0b, 0x63, 0x61, 0x63, 0x68, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x06, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x52, 0x04, 0x6c, 0x6f, 0x67, + 0x73, 0x22, 0x5b, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x6f, + 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x29, 0x0a, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x9d, + 0x03, 0x0a, 0x15, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x53, 0x0a, 0x12, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x11, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x12, 0x4e, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x6f, 0x6c, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x10, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x12, 0x5a, 0x0a, 0x0e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x33, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x0d, + 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x22, 0x2f, 0x0a, + 0x0d, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x0b, + 0x0a, 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x49, + 0x4e, 0x54, 0x45, 0x52, 0x52, 0x55, 0x50, 0x54, 0x49, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x42, 0xb6, + 0x01, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, + 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0xa2, 0x02, 0x03, 0x46, 0x45, 0x58, 0xaa, 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0xca, 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x5c, 0x45, 0x76, 0x65, 0x6e, 0x74, 0xe2, 0x02, 0x1a, 0x46, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x5c, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0f, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x3a, 0x3a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/flyteidl/gen/pb-go/flyteidl/plugins/ray.pb.go b/flyteidl/gen/pb-go/flyteidl/plugins/ray.pb.go index d0f072db51..18d0c4c1cb 100644 --- a/flyteidl/gen/pb-go/flyteidl/plugins/ray.pb.go +++ b/flyteidl/gen/pb-go/flyteidl/plugins/ray.pb.go @@ -7,6 +7,7 @@ package plugins import ( + core "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -186,6 +187,8 @@ type HeadGroupSpec struct { // Optional. RayStartParams are the params of the start command: address, object-store-memory. // Refer to https://docs.ray.io/en/latest/ray-core/package-ref.html#ray-start RayStartParams map[string]string `protobuf:"bytes,1,rep,name=ray_start_params,json=rayStartParams,proto3" json:"ray_start_params,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Pod Spec for the ray head pod + K8SPod *core.K8SPod `protobuf:"bytes,2,opt,name=k8s_pod,json=k8sPod,proto3" json:"k8s_pod,omitempty"` } func (x *HeadGroupSpec) Reset() { @@ -227,6 +230,13 @@ func (x *HeadGroupSpec) GetRayStartParams() map[string]string { return nil } +func (x *HeadGroupSpec) GetK8SPod() *core.K8SPod { + if x != nil { + return x.K8SPod + } + return nil +} + // WorkerGroupSpec are the specs for the worker pods type WorkerGroupSpec struct { state protoimpl.MessageState @@ -244,6 +254,8 @@ type WorkerGroupSpec struct { // Optional. RayStartParams are the params of the start command: address, object-store-memory. // Refer to https://docs.ray.io/en/latest/ray-core/package-ref.html#ray-start RayStartParams map[string]string `protobuf:"bytes,5,rep,name=ray_start_params,json=rayStartParams,proto3" json:"ray_start_params,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Pod Spec for ray worker pods + K8SPod *core.K8SPod `protobuf:"bytes,6,opt,name=k8s_pod,json=k8sPod,proto3" json:"k8s_pod,omitempty"` } func (x *WorkerGroupSpec) Reset() { @@ -313,87 +325,101 @@ func (x *WorkerGroupSpec) GetRayStartParams() map[string]string { return nil } +func (x *WorkerGroupSpec) GetK8SPod() *core.K8SPod { + if x != nil { + return x.K8SPod + } + return nil +} + var File_flyteidl_plugins_ray_proto protoreflect.FileDescriptor var file_flyteidl_plugins_ray_proto_rawDesc = []byte{ 0x0a, 0x1a, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x72, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x66, 0x6c, - 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x22, 0x92, - 0x02, 0x0a, 0x06, 0x52, 0x61, 0x79, 0x4a, 0x6f, 0x62, 0x12, 0x3d, 0x0a, 0x0b, 0x72, 0x61, 0x79, - 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, - 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x73, 0x2e, 0x52, 0x61, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x0a, 0x72, 0x61, - 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x74, - 0x69, 0x6d, 0x65, 0x5f, 0x65, 0x6e, 0x76, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x45, 0x6e, 0x76, 0x12, 0x3d, 0x0a, - 0x1b, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, - 0x6a, 0x6f, 0x62, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x18, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x41, 0x66, 0x74, 0x65, - 0x72, 0x4a, 0x6f, 0x62, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x73, 0x12, 0x3b, 0x0a, 0x1a, - 0x74, 0x74, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x5f, 0x61, 0x66, 0x74, 0x65, - 0x72, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x17, 0x74, 0x74, 0x6c, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x41, 0x66, 0x74, 0x65, - 0x72, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x12, 0x28, 0x0a, 0x10, 0x72, 0x75, 0x6e, - 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x65, 0x6e, 0x76, 0x5f, 0x79, 0x61, 0x6d, 0x6c, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x45, 0x6e, 0x76, 0x59, - 0x61, 0x6d, 0x6c, 0x22, 0xd3, 0x01, 0x0a, 0x0a, 0x52, 0x61, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x12, 0x47, 0x0a, 0x0f, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, - 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, - 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x48, - 0x65, 0x61, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x52, 0x0d, 0x68, 0x65, - 0x61, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x12, 0x4d, 0x0a, 0x11, 0x77, - 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x73, 0x70, 0x65, 0x63, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, - 0x6c, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, - 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x52, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x65, - 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x12, 0x2d, 0x0a, 0x12, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x75, - 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x22, 0xb1, 0x01, 0x0a, 0x0d, 0x48, 0x65, - 0x61, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x12, 0x5d, 0x0a, 0x10, 0x72, - 0x61, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, - 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x47, 0x72, 0x6f, - 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x2e, 0x52, 0x61, 0x79, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x61, 0x79, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x41, 0x0a, 0x13, 0x52, 0x61, - 0x79, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb6, 0x02, - 0x0a, 0x0f, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, - 0x63, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x21, 0x0a, 0x0c, - 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x0b, 0x6d, 0x69, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, - 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x73, 0x12, 0x5f, 0x0a, 0x10, 0x72, 0x61, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, - 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x66, - 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, - 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x2e, - 0x52, 0x61, 0x79, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x61, 0x79, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x1a, 0x41, 0x0a, 0x13, 0x52, 0x61, 0x79, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0xc0, 0x01, 0x0a, 0x14, 0x63, 0x6f, 0x6d, 0x2e, 0x66, - 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x42, - 0x08, 0x52, 0x61, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, - 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, - 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, - 0x64, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xa2, 0x02, 0x03, 0x46, 0x50, 0x58, - 0xaa, 0x02, 0x10, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x50, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x73, 0xca, 0x02, 0x10, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x50, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xe2, 0x02, 0x1c, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, - 0x6c, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, - 0x3a, 0x3a, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x1a, 0x19, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x74, 0x61, + 0x73, 0x6b, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x92, 0x02, 0x0a, 0x06, 0x52, 0x61, + 0x79, 0x4a, 0x6f, 0x62, 0x12, 0x3d, 0x0a, 0x0b, 0x72, 0x61, 0x79, 0x5f, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x52, 0x61, 0x79, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x0a, 0x72, 0x61, 0x79, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x65, + 0x6e, 0x76, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x72, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x45, 0x6e, 0x76, 0x12, 0x3d, 0x0a, 0x1b, 0x73, 0x68, 0x75, 0x74, + 0x64, 0x6f, 0x77, 0x6e, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x66, + 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x73, + 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x41, 0x66, 0x74, 0x65, 0x72, 0x4a, 0x6f, 0x62, 0x46, + 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x74, 0x74, 0x6c, 0x5f, 0x73, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6e, + 0x69, 0x73, 0x68, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x17, 0x74, 0x74, 0x6c, + 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x41, 0x66, 0x74, 0x65, 0x72, 0x46, 0x69, 0x6e, 0x69, + 0x73, 0x68, 0x65, 0x64, 0x12, 0x28, 0x0a, 0x10, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, + 0x65, 0x6e, 0x76, 0x5f, 0x79, 0x61, 0x6d, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x45, 0x6e, 0x76, 0x59, 0x61, 0x6d, 0x6c, 0x22, 0xd3, + 0x01, 0x0a, 0x0a, 0x52, 0x61, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x47, 0x0a, + 0x0f, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x73, 0x70, 0x65, 0x63, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x52, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x12, 0x4d, 0x0a, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, + 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, + 0x53, 0x70, 0x65, 0x63, 0x52, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x53, 0x70, 0x65, 0x63, 0x12, 0x2d, 0x0a, 0x12, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x11, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, + 0x6c, 0x69, 0x6e, 0x67, 0x22, 0xe1, 0x01, 0x0a, 0x0d, 0x48, 0x65, 0x61, 0x64, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x12, 0x5d, 0x0a, 0x10, 0x72, 0x61, 0x79, 0x5f, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x33, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, + 0x63, 0x2e, 0x52, 0x61, 0x79, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x61, 0x79, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2e, 0x0a, 0x07, 0x6b, 0x38, 0x73, 0x5f, 0x70, 0x6f, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x52, 0x06, 0x6b, + 0x38, 0x73, 0x50, 0x6f, 0x64, 0x1a, 0x41, 0x0a, 0x13, 0x52, 0x61, 0x79, 0x53, 0x74, 0x61, 0x72, + 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe6, 0x02, 0x0a, 0x0f, 0x57, 0x6f, 0x72, + 0x6b, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x12, 0x1d, 0x0a, 0x0a, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x69, 0x6e, 0x5f, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6d, + 0x69, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, + 0x78, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x5f, 0x0a, + 0x10, 0x72, 0x61, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, + 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x2e, 0x52, 0x61, 0x79, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, + 0x72, 0x61, 0x79, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2e, + 0x0a, 0x07, 0x6b, 0x38, 0x73, 0x5f, 0x70, 0x6f, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x4b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x52, 0x06, 0x6b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x1a, 0x41, + 0x0a, 0x13, 0x52, 0x61, 0x79, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x42, 0xc0, 0x01, 0x0a, 0x14, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x42, 0x08, 0x52, 0x61, 0x79, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, + 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x73, 0xa2, 0x02, 0x03, 0x46, 0x50, 0x58, 0xaa, 0x02, 0x10, 0x46, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xca, 0x02, + 0x10, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x73, 0xe2, 0x02, 0x1c, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x50, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x73, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0xea, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x3a, 0x3a, 0x50, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -416,18 +442,21 @@ var file_flyteidl_plugins_ray_proto_goTypes = []interface{}{ (*WorkerGroupSpec)(nil), // 3: flyteidl.plugins.WorkerGroupSpec nil, // 4: flyteidl.plugins.HeadGroupSpec.RayStartParamsEntry nil, // 5: flyteidl.plugins.WorkerGroupSpec.RayStartParamsEntry + (*core.K8SPod)(nil), // 6: flyteidl.core.K8sPod } var file_flyteidl_plugins_ray_proto_depIdxs = []int32{ 1, // 0: flyteidl.plugins.RayJob.ray_cluster:type_name -> flyteidl.plugins.RayCluster 2, // 1: flyteidl.plugins.RayCluster.head_group_spec:type_name -> flyteidl.plugins.HeadGroupSpec 3, // 2: flyteidl.plugins.RayCluster.worker_group_spec:type_name -> flyteidl.plugins.WorkerGroupSpec 4, // 3: flyteidl.plugins.HeadGroupSpec.ray_start_params:type_name -> flyteidl.plugins.HeadGroupSpec.RayStartParamsEntry - 5, // 4: flyteidl.plugins.WorkerGroupSpec.ray_start_params:type_name -> flyteidl.plugins.WorkerGroupSpec.RayStartParamsEntry - 5, // [5:5] is the sub-list for method output_type - 5, // [5:5] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name + 6, // 4: flyteidl.plugins.HeadGroupSpec.k8s_pod:type_name -> flyteidl.core.K8sPod + 5, // 5: flyteidl.plugins.WorkerGroupSpec.ray_start_params:type_name -> flyteidl.plugins.WorkerGroupSpec.RayStartParamsEntry + 6, // 6: flyteidl.plugins.WorkerGroupSpec.k8s_pod:type_name -> flyteidl.core.K8sPod + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name } func init() { file_flyteidl_plugins_ray_proto_init() } diff --git a/flyteidl/gen/pb-go/gateway/flyteidl/service/admin.swagger.json b/flyteidl/gen/pb-go/gateway/flyteidl/service/admin.swagger.json index c4f6f3ef7f..01ae020a09 100644 --- a/flyteidl/gen/pb-go/gateway/flyteidl/service/admin.swagger.json +++ b/flyteidl/gen/pb-go/gateway/flyteidl/service/admin.swagger.json @@ -5705,6 +5705,10 @@ "is_array": { "type": "boolean", "description": "Boolean flag indicating if the node is an array node. This is intended to uniquely identify\narray nodes from other nodes which can have is_parent_node as true." + }, + "is_eager": { + "type": "boolean", + "description": "Whether this node is an eager node." } }, "title": "Represents additional attributes related to a Node Execution" @@ -6560,6 +6564,10 @@ "execution_mode": { "$ref": "#/definitions/coreArrayNodeExecutionMode", "description": "execution_mode determines the execution path for ArrayNode." + }, + "is_original_sub_node_interface": { + "type": "boolean", + "title": "Indicates whether the sub node's original interface was altered" } }, "description": "ArrayNode is a Flyte node type that simplifies the execution of a sub-node over a list of input\nvalues. An ArrayNode can be executed with configurable parallelism (separate from the parent\nworkflow) and can be configured to succeed when a certain number of sub-nodes succeed." @@ -8269,6 +8277,10 @@ "type": "string" }, "description": "cache_ignore_input_vars is the input variables that should not be included when calculating hash for cache." + }, + "is_eager": { + "type": "boolean", + "description": "is_eager indicates whether the task is eager or not.\nThis would be used by CreateTask endpoint." } }, "title": "Task Metadata" @@ -8764,6 +8776,10 @@ "is_in_dynamic_chain": { "type": "boolean", "description": "Tasks and subworkflows (but not launch plans) that are run within a dynamic task are effectively independent of\nthe tasks that are registered in Admin's db. Confusingly, they are often identical, but sometimes they are not\neven registered at all. Similar to the target_entity field, at the time Admin receives this event, it has no idea\nif the relevant execution entity is was registered, or dynamic. This field indicates that the target_entity ID,\nas well as task IDs in any corresponding Task Executions, should not be used to looked up the task in Admin's db." + }, + "is_eager": { + "type": "boolean", + "description": "Whether this node launched an eager task." } } }, diff --git a/flyteidl/gen/pb-go/gateway/flyteidl/service/agent.swagger.json b/flyteidl/gen/pb-go/gateway/flyteidl/service/agent.swagger.json index 070b6a8c60..4fbea61e15 100644 --- a/flyteidl/gen/pb-go/gateway/flyteidl/service/agent.swagger.json +++ b/flyteidl/gen/pb-go/gateway/flyteidl/service/agent.swagger.json @@ -466,6 +466,16 @@ "description": "- JSON: JSON / YAML for the metadata (which contains inlined primitive values). The representation is inline with the standard json specification as specified - https://www.json.org/json-en.html\n - PROTO: Proto is a serialized binary of `core.LiteralMap` defined in flyteidl/core", "title": "LiteralMapFormat decides the encoding format in which the input metadata should be made available to the containers.\nIf the user has access to the protocol buffer definitions, it is recommended to use the PROTO format.\nJSON and YAML do not need any protobuf definitions to read it\nAll remote references in core.LiteralMap are replaced with local filesystem references (the data is downloaded to local filesystem)" }, + "ExecutionErrorErrorKind": { + "type": "string", + "enum": [ + "UNKNOWN", + "USER", + "SYSTEM" + ], + "default": "UNKNOWN", + "title": "Error type: System or User" + }, "IOStrategyDownloadMode": { "type": "string", "enum": [ @@ -620,6 +630,33 @@ }, "description": "A message containing the agent metadata." }, + "adminAgentError": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "A simplified code for errors, so that we can provide a glossary of all possible errors." + }, + "kind": { + "$ref": "#/definitions/adminAgentErrorKind", + "description": "An abstract error kind for this error. Defaults to Non_Recoverable if not specified." + }, + "origin": { + "$ref": "#/definitions/ExecutionErrorErrorKind", + "description": "Defines the origin of the error (system, user, unknown)." + } + }, + "description": "Error message to propagate detailed errors from agent executions to the execution\nengine." + }, + "adminAgentErrorKind": { + "type": "string", + "enum": [ + "NON_RECOVERABLE", + "RECOVERABLE" + ], + "default": "NON_RECOVERABLE", + "description": "Defines a generic error type that dictates the behavior of the retry strategy." + }, "adminCreateRequestHeader": { "type": "object", "properties": { @@ -817,6 +854,10 @@ "custom_info": { "type": "object", "description": "Custom data specific to the agent." + }, + "agent_error": { + "$ref": "#/definitions/adminAgentError", + "title": "The error raised during execution" } } }, @@ -1814,6 +1855,10 @@ "type": "string" }, "description": "cache_ignore_input_vars is the input variables that should not be included when calculating hash for cache." + }, + "is_eager": { + "type": "boolean", + "description": "is_eager indicates whether the task is eager or not.\nThis would be used by CreateTask endpoint." } }, "title": "Task Metadata" diff --git a/flyteidl/gen/pb-go/gateway/flyteidl/service/external_plugin_service.swagger.json b/flyteidl/gen/pb-go/gateway/flyteidl/service/external_plugin_service.swagger.json index e690cc556c..1880700dc2 100644 --- a/flyteidl/gen/pb-go/gateway/flyteidl/service/external_plugin_service.swagger.json +++ b/flyteidl/gen/pb-go/gateway/flyteidl/service/external_plugin_service.swagger.json @@ -1063,6 +1063,10 @@ "type": "string" }, "description": "cache_ignore_input_vars is the input variables that should not be included when calculating hash for cache." + }, + "is_eager": { + "type": "boolean", + "description": "is_eager indicates whether the task is eager or not.\nThis would be used by CreateTask endpoint." } }, "title": "Task Metadata" diff --git a/flyteidl/gen/pb-js/flyteidl.d.ts b/flyteidl/gen/pb-js/flyteidl.d.ts index 73b5a73eaa..6b4c1bb3ab 100644 --- a/flyteidl/gen/pb-js/flyteidl.d.ts +++ b/flyteidl/gen/pb-js/flyteidl.d.ts @@ -4586,6 +4586,9 @@ export namespace flyteidl { /** ArrayNode executionMode */ executionMode?: (flyteidl.core.ArrayNode.ExecutionMode|null); + + /** ArrayNode isOriginalSubNodeInterface */ + isOriginalSubNodeInterface?: (google.protobuf.IBoolValue|null); } /** Represents an ArrayNode. */ @@ -4612,6 +4615,9 @@ export namespace flyteidl { /** ArrayNode executionMode. */ public executionMode: flyteidl.core.ArrayNode.ExecutionMode; + /** ArrayNode isOriginalSubNodeInterface. */ + public isOriginalSubNodeInterface?: (google.protobuf.IBoolValue|null); + /** ArrayNode parallelismOption. */ public parallelismOption?: "parallelism"; @@ -6401,6 +6407,9 @@ export namespace flyteidl { /** TaskMetadata cacheIgnoreInputVars */ cacheIgnoreInputVars?: (string[]|null); + + /** TaskMetadata isEager */ + isEager?: (boolean|null); } /** Represents a TaskMetadata. */ @@ -6448,6 +6457,9 @@ export namespace flyteidl { /** TaskMetadata cacheIgnoreInputVars. */ public cacheIgnoreInputVars: string[]; + /** TaskMetadata isEager. */ + public isEager: boolean; + /** TaskMetadata interruptibleValue. */ public interruptibleValue?: "interruptible"; @@ -8513,6 +8525,9 @@ export namespace flyteidl { /** NodeExecutionEvent isInDynamicChain */ isInDynamicChain?: (boolean|null); + + /** NodeExecutionEvent isEager */ + isEager?: (boolean|null); } /** Represents a NodeExecutionEvent. */ @@ -8596,6 +8611,9 @@ export namespace flyteidl { /** NodeExecutionEvent isInDynamicChain. */ public isInDynamicChain: boolean; + /** NodeExecutionEvent isEager. */ + public isEager: boolean; + /** NodeExecutionEvent inputValue. */ public inputValue?: ("inputUri"|"inputData"); @@ -10016,6 +10034,9 @@ export namespace flyteidl { /** Resource customInfo */ customInfo?: (google.protobuf.IStruct|null); + + /** Resource agentError */ + agentError?: (flyteidl.admin.IAgentError|null); } /** Represents a Resource. */ @@ -10045,6 +10066,9 @@ export namespace flyteidl { /** Resource customInfo. */ public customInfo?: (google.protobuf.IStruct|null); + /** Resource agentError. */ + public agentError?: (flyteidl.admin.IAgentError|null); + /** * Creates a new Resource instance using the specified properties. * @param [properties] Properties to set @@ -10899,6 +10923,79 @@ export namespace flyteidl { public static verify(message: { [k: string]: any }): (string|null); } + /** Properties of an AgentError. */ + interface IAgentError { + + /** AgentError code */ + code?: (string|null); + + /** AgentError kind */ + kind?: (flyteidl.admin.AgentError.Kind|null); + + /** AgentError origin */ + origin?: (flyteidl.core.ExecutionError.ErrorKind|null); + } + + /** Represents an AgentError. */ + class AgentError implements IAgentError { + + /** + * Constructs a new AgentError. + * @param [properties] Properties to set + */ + constructor(properties?: flyteidl.admin.IAgentError); + + /** AgentError code. */ + public code: string; + + /** AgentError kind. */ + public kind: flyteidl.admin.AgentError.Kind; + + /** AgentError origin. */ + public origin: flyteidl.core.ExecutionError.ErrorKind; + + /** + * Creates a new AgentError instance using the specified properties. + * @param [properties] Properties to set + * @returns AgentError instance + */ + public static create(properties?: flyteidl.admin.IAgentError): flyteidl.admin.AgentError; + + /** + * Encodes the specified AgentError message. Does not implicitly {@link flyteidl.admin.AgentError.verify|verify} messages. + * @param message AgentError message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: flyteidl.admin.IAgentError, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an AgentError message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns AgentError + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): flyteidl.admin.AgentError; + + /** + * Verifies an AgentError message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + } + + namespace AgentError { + + /** Kind enum. */ + enum Kind { + NON_RECOVERABLE = 0, + RECOVERABLE = 1 + } + } + /** Properties of a ClusterAssignment. */ interface IClusterAssignment { @@ -16958,6 +17055,9 @@ export namespace flyteidl { /** NodeExecutionMetaData isArray */ isArray?: (boolean|null); + + /** NodeExecutionMetaData isEager */ + isEager?: (boolean|null); } /** Represents a NodeExecutionMetaData. */ @@ -16984,6 +17084,9 @@ export namespace flyteidl { /** NodeExecutionMetaData isArray. */ public isArray: boolean; + /** NodeExecutionMetaData isEager. */ + public isEager: boolean; + /** * Creates a new NodeExecutionMetaData instance using the specified properties. * @param [properties] Properties to set diff --git a/flyteidl/gen/pb-js/flyteidl.js b/flyteidl/gen/pb-js/flyteidl.js index 970a69229c..91c985b439 100644 --- a/flyteidl/gen/pb-js/flyteidl.js +++ b/flyteidl/gen/pb-js/flyteidl.js @@ -10982,6 +10982,7 @@ * @property {number|null} [minSuccesses] ArrayNode minSuccesses * @property {number|null} [minSuccessRatio] ArrayNode minSuccessRatio * @property {flyteidl.core.ArrayNode.ExecutionMode|null} [executionMode] ArrayNode executionMode + * @property {google.protobuf.IBoolValue|null} [isOriginalSubNodeInterface] ArrayNode isOriginalSubNodeInterface */ /** @@ -11039,6 +11040,14 @@ */ ArrayNode.prototype.executionMode = 0; + /** + * ArrayNode isOriginalSubNodeInterface. + * @member {google.protobuf.IBoolValue|null|undefined} isOriginalSubNodeInterface + * @memberof flyteidl.core.ArrayNode + * @instance + */ + ArrayNode.prototype.isOriginalSubNodeInterface = null; + // OneOf field names bound to virtual getters and setters var $oneOfFields; @@ -11098,6 +11107,8 @@ writer.uint32(/* id 4, wireType 5 =*/37).float(message.minSuccessRatio); if (message.executionMode != null && message.hasOwnProperty("executionMode")) writer.uint32(/* id 5, wireType 0 =*/40).int32(message.executionMode); + if (message.isOriginalSubNodeInterface != null && message.hasOwnProperty("isOriginalSubNodeInterface")) + $root.google.protobuf.BoolValue.encode(message.isOriginalSubNodeInterface, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); return writer; }; @@ -11134,6 +11145,9 @@ case 5: message.executionMode = reader.int32(); break; + case 6: + message.isOriginalSubNodeInterface = $root.google.protobuf.BoolValue.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -11184,6 +11198,11 @@ case 1: break; } + if (message.isOriginalSubNodeInterface != null && message.hasOwnProperty("isOriginalSubNodeInterface")) { + var error = $root.google.protobuf.BoolValue.verify(message.isOriginalSubNodeInterface); + if (error) + return "isOriginalSubNodeInterface." + error; + } return null; }; @@ -15302,6 +15321,7 @@ * @property {Object.|null} [tags] TaskMetadata tags * @property {string|null} [podTemplateName] TaskMetadata podTemplateName * @property {Array.|null} [cacheIgnoreInputVars] TaskMetadata cacheIgnoreInputVars + * @property {boolean|null} [isEager] TaskMetadata isEager */ /** @@ -15417,6 +15437,14 @@ */ TaskMetadata.prototype.cacheIgnoreInputVars = $util.emptyArray; + /** + * TaskMetadata isEager. + * @member {boolean} isEager + * @memberof flyteidl.core.TaskMetadata + * @instance + */ + TaskMetadata.prototype.isEager = false; + // OneOf field names bound to virtual getters and setters var $oneOfFields; @@ -15481,6 +15509,8 @@ if (message.cacheIgnoreInputVars != null && message.cacheIgnoreInputVars.length) for (var i = 0; i < message.cacheIgnoreInputVars.length; ++i) writer.uint32(/* id 13, wireType 2 =*/106).string(message.cacheIgnoreInputVars[i]); + if (message.isEager != null && message.hasOwnProperty("isEager")) + writer.uint32(/* id 14, wireType 0 =*/112).bool(message.isEager); return writer; }; @@ -15545,6 +15575,9 @@ message.cacheIgnoreInputVars = []; message.cacheIgnoreInputVars.push(reader.string()); break; + case 14: + message.isEager = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -15618,6 +15651,9 @@ if (!$util.isString(message.cacheIgnoreInputVars[i])) return "cacheIgnoreInputVars: string[] expected"; } + if (message.isEager != null && message.hasOwnProperty("isEager")) + if (typeof message.isEager !== "boolean") + return "isEager: boolean expected"; return null; }; @@ -20578,6 +20614,7 @@ * @property {boolean|null} [isArray] NodeExecutionEvent isArray * @property {flyteidl.core.IIdentifier|null} [targetEntity] NodeExecutionEvent targetEntity * @property {boolean|null} [isInDynamicChain] NodeExecutionEvent isInDynamicChain + * @property {boolean|null} [isEager] NodeExecutionEvent isEager */ /** @@ -20787,6 +20824,14 @@ */ NodeExecutionEvent.prototype.isInDynamicChain = false; + /** + * NodeExecutionEvent isEager. + * @member {boolean} isEager + * @memberof flyteidl.event.NodeExecutionEvent + * @instance + */ + NodeExecutionEvent.prototype.isEager = false; + // OneOf field names bound to virtual getters and setters var $oneOfFields; @@ -20895,6 +20940,8 @@ $root.flyteidl.core.Identifier.encode(message.targetEntity, writer.uint32(/* id 23, wireType 2 =*/186).fork()).ldelim(); if (message.isInDynamicChain != null && message.hasOwnProperty("isInDynamicChain")) writer.uint32(/* id 24, wireType 0 =*/192).bool(message.isInDynamicChain); + if (message.isEager != null && message.hasOwnProperty("isEager")) + writer.uint32(/* id 25, wireType 0 =*/200).bool(message.isEager); return writer; }; @@ -20988,6 +21035,9 @@ case 24: message.isInDynamicChain = reader.bool(); break; + case 25: + message.isEager = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -21143,6 +21193,9 @@ if (message.isInDynamicChain != null && message.hasOwnProperty("isInDynamicChain")) if (typeof message.isInDynamicChain !== "boolean") return "isInDynamicChain: boolean expected"; + if (message.isEager != null && message.hasOwnProperty("isEager")) + if (typeof message.isEager !== "boolean") + return "isEager: boolean expected"; return null; }; @@ -24545,6 +24598,7 @@ * @property {Array.|null} [logLinks] Resource logLinks * @property {flyteidl.core.TaskExecution.Phase|null} [phase] Resource phase * @property {google.protobuf.IStruct|null} [customInfo] Resource customInfo + * @property {flyteidl.admin.IAgentError|null} [agentError] Resource agentError */ /** @@ -24611,6 +24665,14 @@ */ Resource.prototype.customInfo = null; + /** + * Resource agentError. + * @member {flyteidl.admin.IAgentError|null|undefined} agentError + * @memberof flyteidl.admin.Resource + * @instance + */ + Resource.prototype.agentError = null; + /** * Creates a new Resource instance using the specified properties. * @function create @@ -24648,6 +24710,8 @@ writer.uint32(/* id 5, wireType 0 =*/40).int32(message.phase); if (message.customInfo != null && message.hasOwnProperty("customInfo")) $root.google.protobuf.Struct.encode(message.customInfo, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); + if (message.agentError != null && message.hasOwnProperty("agentError")) + $root.flyteidl.admin.AgentError.encode(message.agentError, writer.uint32(/* id 7, wireType 2 =*/58).fork()).ldelim(); return writer; }; @@ -24689,6 +24753,9 @@ case 6: message.customInfo = $root.google.protobuf.Struct.decode(reader, reader.uint32()); break; + case 7: + message.agentError = $root.flyteidl.admin.AgentError.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -24755,6 +24822,11 @@ if (error) return "customInfo." + error; } + if (message.agentError != null && message.hasOwnProperty("agentError")) { + var error = $root.flyteidl.admin.AgentError.verify(message.agentError); + if (error) + return "agentError." + error; + } return null; }; @@ -26651,6 +26723,175 @@ return GetTaskLogsResponse; })(); + admin.AgentError = (function() { + + /** + * Properties of an AgentError. + * @memberof flyteidl.admin + * @interface IAgentError + * @property {string|null} [code] AgentError code + * @property {flyteidl.admin.AgentError.Kind|null} [kind] AgentError kind + * @property {flyteidl.core.ExecutionError.ErrorKind|null} [origin] AgentError origin + */ + + /** + * Constructs a new AgentError. + * @memberof flyteidl.admin + * @classdesc Represents an AgentError. + * @implements IAgentError + * @constructor + * @param {flyteidl.admin.IAgentError=} [properties] Properties to set + */ + function AgentError(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * AgentError code. + * @member {string} code + * @memberof flyteidl.admin.AgentError + * @instance + */ + AgentError.prototype.code = ""; + + /** + * AgentError kind. + * @member {flyteidl.admin.AgentError.Kind} kind + * @memberof flyteidl.admin.AgentError + * @instance + */ + AgentError.prototype.kind = 0; + + /** + * AgentError origin. + * @member {flyteidl.core.ExecutionError.ErrorKind} origin + * @memberof flyteidl.admin.AgentError + * @instance + */ + AgentError.prototype.origin = 0; + + /** + * Creates a new AgentError instance using the specified properties. + * @function create + * @memberof flyteidl.admin.AgentError + * @static + * @param {flyteidl.admin.IAgentError=} [properties] Properties to set + * @returns {flyteidl.admin.AgentError} AgentError instance + */ + AgentError.create = function create(properties) { + return new AgentError(properties); + }; + + /** + * Encodes the specified AgentError message. Does not implicitly {@link flyteidl.admin.AgentError.verify|verify} messages. + * @function encode + * @memberof flyteidl.admin.AgentError + * @static + * @param {flyteidl.admin.IAgentError} message AgentError message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + AgentError.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.code != null && message.hasOwnProperty("code")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.code); + if (message.kind != null && message.hasOwnProperty("kind")) + writer.uint32(/* id 3, wireType 0 =*/24).int32(message.kind); + if (message.origin != null && message.hasOwnProperty("origin")) + writer.uint32(/* id 4, wireType 0 =*/32).int32(message.origin); + return writer; + }; + + /** + * Decodes an AgentError message from the specified reader or buffer. + * @function decode + * @memberof flyteidl.admin.AgentError + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {flyteidl.admin.AgentError} AgentError + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + AgentError.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.flyteidl.admin.AgentError(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.code = reader.string(); + break; + case 3: + message.kind = reader.int32(); + break; + case 4: + message.origin = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Verifies an AgentError message. + * @function verify + * @memberof flyteidl.admin.AgentError + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + AgentError.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.code != null && message.hasOwnProperty("code")) + if (!$util.isString(message.code)) + return "code: string expected"; + if (message.kind != null && message.hasOwnProperty("kind")) + switch (message.kind) { + default: + return "kind: enum value expected"; + case 0: + case 1: + break; + } + if (message.origin != null && message.hasOwnProperty("origin")) + switch (message.origin) { + default: + return "origin: enum value expected"; + case 0: + case 1: + case 2: + break; + } + return null; + }; + + /** + * Kind enum. + * @name flyteidl.admin.AgentError.Kind + * @enum {string} + * @property {number} NON_RECOVERABLE=0 NON_RECOVERABLE value + * @property {number} RECOVERABLE=1 RECOVERABLE value + */ + AgentError.Kind = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "NON_RECOVERABLE"] = 0; + values[valuesById[1] = "RECOVERABLE"] = 1; + return values; + })(); + + return AgentError; + })(); + admin.ClusterAssignment = (function() { /** @@ -41057,6 +41298,7 @@ * @property {string|null} [specNodeId] NodeExecutionMetaData specNodeId * @property {boolean|null} [isDynamic] NodeExecutionMetaData isDynamic * @property {boolean|null} [isArray] NodeExecutionMetaData isArray + * @property {boolean|null} [isEager] NodeExecutionMetaData isEager */ /** @@ -41114,6 +41356,14 @@ */ NodeExecutionMetaData.prototype.isArray = false; + /** + * NodeExecutionMetaData isEager. + * @member {boolean} isEager + * @memberof flyteidl.admin.NodeExecutionMetaData + * @instance + */ + NodeExecutionMetaData.prototype.isEager = false; + /** * Creates a new NodeExecutionMetaData instance using the specified properties. * @function create @@ -41148,6 +41398,8 @@ writer.uint32(/* id 4, wireType 0 =*/32).bool(message.isDynamic); if (message.isArray != null && message.hasOwnProperty("isArray")) writer.uint32(/* id 5, wireType 0 =*/40).bool(message.isArray); + if (message.isEager != null && message.hasOwnProperty("isEager")) + writer.uint32(/* id 6, wireType 0 =*/48).bool(message.isEager); return writer; }; @@ -41184,6 +41436,9 @@ case 5: message.isArray = reader.bool(); break; + case 6: + message.isEager = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -41218,6 +41473,9 @@ if (message.isArray != null && message.hasOwnProperty("isArray")) if (typeof message.isArray !== "boolean") return "isArray: boolean expected"; + if (message.isEager != null && message.hasOwnProperty("isEager")) + if (typeof message.isEager !== "boolean") + return "isEager: boolean expected"; return null; }; diff --git a/flyteidl/gen/pb_python/flyteidl/admin/agent_pb2.py b/flyteidl/gen/pb_python/flyteidl/admin/agent_pb2.py index 03e181a3e1..924a7e94e6 100644 --- a/flyteidl/gen/pb_python/flyteidl/admin/agent_pb2.py +++ b/flyteidl/gen/pb_python/flyteidl/admin/agent_pb2.py @@ -23,7 +23,7 @@ from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1a\x66lyteidl/admin/agent.proto\x12\x0e\x66lyteidl.admin\x1a\x1c\x66lyteidl/core/literals.proto\x1a\x19\x66lyteidl/core/tasks.proto\x1a\x1c\x66lyteidl/core/workflow.proto\x1a\x1e\x66lyteidl/core/identifier.proto\x1a\x1d\x66lyteidl/core/execution.proto\x1a\x1b\x66lyteidl/core/metrics.proto\x1a\x1c\x66lyteidl/core/security.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/protobuf/struct.proto\"\x9e\x07\n\x15TaskExecutionMetadata\x12R\n\x11task_execution_id\x18\x01 \x01(\x0b\x32&.flyteidl.core.TaskExecutionIdentifierR\x0ftaskExecutionId\x12\x1c\n\tnamespace\x18\x02 \x01(\tR\tnamespace\x12I\n\x06labels\x18\x03 \x03(\x0b\x32\x31.flyteidl.admin.TaskExecutionMetadata.LabelsEntryR\x06labels\x12X\n\x0b\x61nnotations\x18\x04 \x03(\x0b\x32\x36.flyteidl.admin.TaskExecutionMetadata.AnnotationsEntryR\x0b\x61nnotations\x12.\n\x13k8s_service_account\x18\x05 \x01(\tR\x11k8sServiceAccount\x12t\n\x15\x65nvironment_variables\x18\x06 \x03(\x0b\x32?.flyteidl.admin.TaskExecutionMetadata.EnvironmentVariablesEntryR\x14\x65nvironmentVariables\x12!\n\x0cmax_attempts\x18\x07 \x01(\x05R\x0bmaxAttempts\x12$\n\rinterruptible\x18\x08 \x01(\x08R\rinterruptible\x12\x46\n\x1finterruptible_failure_threshold\x18\t \x01(\x05R\x1dinterruptibleFailureThreshold\x12>\n\toverrides\x18\n \x01(\x0b\x32 .flyteidl.core.TaskNodeOverridesR\toverrides\x12\x33\n\x08identity\x18\x0b \x01(\x0b\x32\x17.flyteidl.core.IdentityR\x08identity\x1a\x39\n\x0bLabelsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\x1a>\n\x10\x41nnotationsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\x1aG\n\x19\x45nvironmentVariablesEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\"\x83\x02\n\x11\x43reateTaskRequest\x12\x31\n\x06inputs\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapR\x06inputs\x12\x37\n\x08template\x18\x02 \x01(\x0b\x32\x1b.flyteidl.core.TaskTemplateR\x08template\x12#\n\routput_prefix\x18\x03 \x01(\tR\x0coutputPrefix\x12]\n\x17task_execution_metadata\x18\x04 \x01(\x0b\x32%.flyteidl.admin.TaskExecutionMetadataR\x15taskExecutionMetadata\"9\n\x12\x43reateTaskResponse\x12#\n\rresource_meta\x18\x01 \x01(\x0cR\x0cresourceMeta\"\x87\x02\n\x13\x43reateRequestHeader\x12\x37\n\x08template\x18\x01 \x01(\x0b\x32\x1b.flyteidl.core.TaskTemplateR\x08template\x12#\n\routput_prefix\x18\x02 \x01(\tR\x0coutputPrefix\x12]\n\x17task_execution_metadata\x18\x03 \x01(\x0b\x32%.flyteidl.admin.TaskExecutionMetadataR\x15taskExecutionMetadata\x12\x33\n\x16max_dataset_size_bytes\x18\x04 \x01(\x03R\x13maxDatasetSizeBytes\"\x94\x01\n\x16\x45xecuteTaskSyncRequest\x12=\n\x06header\x18\x01 \x01(\x0b\x32#.flyteidl.admin.CreateRequestHeaderH\x00R\x06header\x12\x33\n\x06inputs\x18\x02 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapH\x00R\x06inputsB\x06\n\x04part\"U\n\x1d\x45xecuteTaskSyncResponseHeader\x12\x34\n\x08resource\x18\x01 \x01(\x0b\x32\x18.flyteidl.admin.ResourceR\x08resource\"\xa0\x01\n\x17\x45xecuteTaskSyncResponse\x12G\n\x06header\x18\x01 \x01(\x0b\x32-.flyteidl.admin.ExecuteTaskSyncResponseHeaderH\x00R\x06header\x12\x35\n\x07outputs\x18\x02 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapH\x00R\x07outputsB\x05\n\x03res\"\x99\x01\n\x0eGetTaskRequest\x12\x1f\n\ttask_type\x18\x01 \x01(\tB\x02\x18\x01R\x08taskType\x12#\n\rresource_meta\x18\x02 \x01(\x0cR\x0cresourceMeta\x12\x41\n\rtask_category\x18\x03 \x01(\x0b\x32\x1c.flyteidl.admin.TaskCategoryR\x0ctaskCategory\"G\n\x0fGetTaskResponse\x12\x34\n\x08resource\x18\x01 \x01(\x0b\x32\x18.flyteidl.admin.ResourceR\x08resource\"\xb3\x02\n\x08Resource\x12/\n\x05state\x18\x01 \x01(\x0e\x32\x15.flyteidl.admin.StateB\x02\x18\x01R\x05state\x12\x33\n\x07outputs\x18\x02 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapR\x07outputs\x12\x18\n\x07message\x18\x03 \x01(\tR\x07message\x12\x33\n\tlog_links\x18\x04 \x03(\x0b\x32\x16.flyteidl.core.TaskLogR\x08logLinks\x12\x38\n\x05phase\x18\x05 \x01(\x0e\x32\".flyteidl.core.TaskExecution.PhaseR\x05phase\x12\x38\n\x0b\x63ustom_info\x18\x06 \x01(\x0b\x32\x17.google.protobuf.StructR\ncustomInfo\"\x9c\x01\n\x11\x44\x65leteTaskRequest\x12\x1f\n\ttask_type\x18\x01 \x01(\tB\x02\x18\x01R\x08taskType\x12#\n\rresource_meta\x18\x02 \x01(\x0cR\x0cresourceMeta\x12\x41\n\rtask_category\x18\x03 \x01(\x0b\x32\x1c.flyteidl.admin.TaskCategoryR\x0ctaskCategory\"\x14\n\x12\x44\x65leteTaskResponse\"\xc4\x01\n\x05\x41gent\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x34\n\x14supported_task_types\x18\x02 \x03(\tB\x02\x18\x01R\x12supportedTaskTypes\x12\x17\n\x07is_sync\x18\x03 \x01(\x08R\x06isSync\x12X\n\x19supported_task_categories\x18\x04 \x03(\x0b\x32\x1c.flyteidl.admin.TaskCategoryR\x17supportedTaskCategories\"<\n\x0cTaskCategory\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n\x07version\x18\x02 \x01(\x05R\x07version\"%\n\x0fGetAgentRequest\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\"?\n\x10GetAgentResponse\x12+\n\x05\x61gent\x18\x01 \x01(\x0b\x32\x15.flyteidl.admin.AgentR\x05\x61gent\"\x13\n\x11ListAgentsRequest\"C\n\x12ListAgentsResponse\x12-\n\x06\x61gents\x18\x01 \x03(\x0b\x32\x15.flyteidl.admin.AgentR\x06\x61gents\"\xdb\x02\n\x15GetTaskMetricsRequest\x12\x1f\n\ttask_type\x18\x01 \x01(\tB\x02\x18\x01R\x08taskType\x12#\n\rresource_meta\x18\x02 \x01(\x0cR\x0cresourceMeta\x12\x18\n\x07queries\x18\x03 \x03(\tR\x07queries\x12\x39\n\nstart_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tstartTime\x12\x35\n\x08\x65nd_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\x07\x65ndTime\x12-\n\x04step\x18\x06 \x01(\x0b\x32\x19.google.protobuf.DurationR\x04step\x12\x41\n\rtask_category\x18\x07 \x01(\x0b\x32\x1c.flyteidl.admin.TaskCategoryR\x0ctaskCategory\"X\n\x16GetTaskMetricsResponse\x12>\n\x07results\x18\x01 \x03(\x0b\x32$.flyteidl.core.ExecutionMetricResultR\x07results\"\xc9\x01\n\x12GetTaskLogsRequest\x12\x1f\n\ttask_type\x18\x01 \x01(\tB\x02\x18\x01R\x08taskType\x12#\n\rresource_meta\x18\x02 \x01(\x0cR\x0cresourceMeta\x12\x14\n\x05lines\x18\x03 \x01(\x04R\x05lines\x12\x14\n\x05token\x18\x04 \x01(\tR\x05token\x12\x41\n\rtask_category\x18\x05 \x01(\x0b\x32\x1c.flyteidl.admin.TaskCategoryR\x0ctaskCategory\"1\n\x19GetTaskLogsResponseHeader\x12\x14\n\x05token\x18\x01 \x01(\tR\x05token\"3\n\x17GetTaskLogsResponseBody\x12\x18\n\x07results\x18\x01 \x03(\tR\x07results\"\xa1\x01\n\x13GetTaskLogsResponse\x12\x43\n\x06header\x18\x01 \x01(\x0b\x32).flyteidl.admin.GetTaskLogsResponseHeaderH\x00R\x06header\x12=\n\x04\x62ody\x18\x02 \x01(\x0b\x32\'.flyteidl.admin.GetTaskLogsResponseBodyH\x00R\x04\x62odyB\x06\n\x04part*b\n\x05State\x12\x15\n\x11RETRYABLE_FAILURE\x10\x00\x12\x15\n\x11PERMANENT_FAILURE\x10\x01\x12\x0b\n\x07PENDING\x10\x02\x12\x0b\n\x07RUNNING\x10\x03\x12\r\n\tSUCCEEDED\x10\x04\x1a\x02\x18\x01\x42\xb6\x01\n\x12\x63om.flyteidl.adminB\nAgentProtoP\x01Z;github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin\xa2\x02\x03\x46\x41X\xaa\x02\x0e\x46lyteidl.Admin\xca\x02\x0e\x46lyteidl\\Admin\xe2\x02\x1a\x46lyteidl\\Admin\\GPBMetadata\xea\x02\x0f\x46lyteidl::Adminb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1a\x66lyteidl/admin/agent.proto\x12\x0e\x66lyteidl.admin\x1a\x1c\x66lyteidl/core/literals.proto\x1a\x19\x66lyteidl/core/tasks.proto\x1a\x1c\x66lyteidl/core/workflow.proto\x1a\x1e\x66lyteidl/core/identifier.proto\x1a\x1d\x66lyteidl/core/execution.proto\x1a\x1b\x66lyteidl/core/metrics.proto\x1a\x1c\x66lyteidl/core/security.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/protobuf/struct.proto\"\x9e\x07\n\x15TaskExecutionMetadata\x12R\n\x11task_execution_id\x18\x01 \x01(\x0b\x32&.flyteidl.core.TaskExecutionIdentifierR\x0ftaskExecutionId\x12\x1c\n\tnamespace\x18\x02 \x01(\tR\tnamespace\x12I\n\x06labels\x18\x03 \x03(\x0b\x32\x31.flyteidl.admin.TaskExecutionMetadata.LabelsEntryR\x06labels\x12X\n\x0b\x61nnotations\x18\x04 \x03(\x0b\x32\x36.flyteidl.admin.TaskExecutionMetadata.AnnotationsEntryR\x0b\x61nnotations\x12.\n\x13k8s_service_account\x18\x05 \x01(\tR\x11k8sServiceAccount\x12t\n\x15\x65nvironment_variables\x18\x06 \x03(\x0b\x32?.flyteidl.admin.TaskExecutionMetadata.EnvironmentVariablesEntryR\x14\x65nvironmentVariables\x12!\n\x0cmax_attempts\x18\x07 \x01(\x05R\x0bmaxAttempts\x12$\n\rinterruptible\x18\x08 \x01(\x08R\rinterruptible\x12\x46\n\x1finterruptible_failure_threshold\x18\t \x01(\x05R\x1dinterruptibleFailureThreshold\x12>\n\toverrides\x18\n \x01(\x0b\x32 .flyteidl.core.TaskNodeOverridesR\toverrides\x12\x33\n\x08identity\x18\x0b \x01(\x0b\x32\x17.flyteidl.core.IdentityR\x08identity\x1a\x39\n\x0bLabelsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\x1a>\n\x10\x41nnotationsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\x1aG\n\x19\x45nvironmentVariablesEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\"\x83\x02\n\x11\x43reateTaskRequest\x12\x31\n\x06inputs\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapR\x06inputs\x12\x37\n\x08template\x18\x02 \x01(\x0b\x32\x1b.flyteidl.core.TaskTemplateR\x08template\x12#\n\routput_prefix\x18\x03 \x01(\tR\x0coutputPrefix\x12]\n\x17task_execution_metadata\x18\x04 \x01(\x0b\x32%.flyteidl.admin.TaskExecutionMetadataR\x15taskExecutionMetadata\"9\n\x12\x43reateTaskResponse\x12#\n\rresource_meta\x18\x01 \x01(\x0cR\x0cresourceMeta\"\x87\x02\n\x13\x43reateRequestHeader\x12\x37\n\x08template\x18\x01 \x01(\x0b\x32\x1b.flyteidl.core.TaskTemplateR\x08template\x12#\n\routput_prefix\x18\x02 \x01(\tR\x0coutputPrefix\x12]\n\x17task_execution_metadata\x18\x03 \x01(\x0b\x32%.flyteidl.admin.TaskExecutionMetadataR\x15taskExecutionMetadata\x12\x33\n\x16max_dataset_size_bytes\x18\x04 \x01(\x03R\x13maxDatasetSizeBytes\"\x94\x01\n\x16\x45xecuteTaskSyncRequest\x12=\n\x06header\x18\x01 \x01(\x0b\x32#.flyteidl.admin.CreateRequestHeaderH\x00R\x06header\x12\x33\n\x06inputs\x18\x02 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapH\x00R\x06inputsB\x06\n\x04part\"U\n\x1d\x45xecuteTaskSyncResponseHeader\x12\x34\n\x08resource\x18\x01 \x01(\x0b\x32\x18.flyteidl.admin.ResourceR\x08resource\"\xa0\x01\n\x17\x45xecuteTaskSyncResponse\x12G\n\x06header\x18\x01 \x01(\x0b\x32-.flyteidl.admin.ExecuteTaskSyncResponseHeaderH\x00R\x06header\x12\x35\n\x07outputs\x18\x02 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapH\x00R\x07outputsB\x05\n\x03res\"\x99\x01\n\x0eGetTaskRequest\x12\x1f\n\ttask_type\x18\x01 \x01(\tB\x02\x18\x01R\x08taskType\x12#\n\rresource_meta\x18\x02 \x01(\x0cR\x0cresourceMeta\x12\x41\n\rtask_category\x18\x03 \x01(\x0b\x32\x1c.flyteidl.admin.TaskCategoryR\x0ctaskCategory\"G\n\x0fGetTaskResponse\x12\x34\n\x08resource\x18\x01 \x01(\x0b\x32\x18.flyteidl.admin.ResourceR\x08resource\"\xf0\x02\n\x08Resource\x12/\n\x05state\x18\x01 \x01(\x0e\x32\x15.flyteidl.admin.StateB\x02\x18\x01R\x05state\x12\x33\n\x07outputs\x18\x02 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapR\x07outputs\x12\x18\n\x07message\x18\x03 \x01(\tR\x07message\x12\x33\n\tlog_links\x18\x04 \x03(\x0b\x32\x16.flyteidl.core.TaskLogR\x08logLinks\x12\x38\n\x05phase\x18\x05 \x01(\x0e\x32\".flyteidl.core.TaskExecution.PhaseR\x05phase\x12\x38\n\x0b\x63ustom_info\x18\x06 \x01(\x0b\x32\x17.google.protobuf.StructR\ncustomInfo\x12;\n\x0b\x61gent_error\x18\x07 \x01(\x0b\x32\x1a.flyteidl.admin.AgentErrorR\nagentError\"\x9c\x01\n\x11\x44\x65leteTaskRequest\x12\x1f\n\ttask_type\x18\x01 \x01(\tB\x02\x18\x01R\x08taskType\x12#\n\rresource_meta\x18\x02 \x01(\x0cR\x0cresourceMeta\x12\x41\n\rtask_category\x18\x03 \x01(\x0b\x32\x1c.flyteidl.admin.TaskCategoryR\x0ctaskCategory\"\x14\n\x12\x44\x65leteTaskResponse\"\xc4\x01\n\x05\x41gent\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x34\n\x14supported_task_types\x18\x02 \x03(\tB\x02\x18\x01R\x12supportedTaskTypes\x12\x17\n\x07is_sync\x18\x03 \x01(\x08R\x06isSync\x12X\n\x19supported_task_categories\x18\x04 \x03(\x0b\x32\x1c.flyteidl.admin.TaskCategoryR\x17supportedTaskCategories\"<\n\x0cTaskCategory\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n\x07version\x18\x02 \x01(\x05R\x07version\"%\n\x0fGetAgentRequest\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\"?\n\x10GetAgentResponse\x12+\n\x05\x61gent\x18\x01 \x01(\x0b\x32\x15.flyteidl.admin.AgentR\x05\x61gent\"\x13\n\x11ListAgentsRequest\"C\n\x12ListAgentsResponse\x12-\n\x06\x61gents\x18\x01 \x03(\x0b\x32\x15.flyteidl.admin.AgentR\x06\x61gents\"\xdb\x02\n\x15GetTaskMetricsRequest\x12\x1f\n\ttask_type\x18\x01 \x01(\tB\x02\x18\x01R\x08taskType\x12#\n\rresource_meta\x18\x02 \x01(\x0cR\x0cresourceMeta\x12\x18\n\x07queries\x18\x03 \x03(\tR\x07queries\x12\x39\n\nstart_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tstartTime\x12\x35\n\x08\x65nd_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\x07\x65ndTime\x12-\n\x04step\x18\x06 \x01(\x0b\x32\x19.google.protobuf.DurationR\x04step\x12\x41\n\rtask_category\x18\x07 \x01(\x0b\x32\x1c.flyteidl.admin.TaskCategoryR\x0ctaskCategory\"X\n\x16GetTaskMetricsResponse\x12>\n\x07results\x18\x01 \x03(\x0b\x32$.flyteidl.core.ExecutionMetricResultR\x07results\"\xc9\x01\n\x12GetTaskLogsRequest\x12\x1f\n\ttask_type\x18\x01 \x01(\tB\x02\x18\x01R\x08taskType\x12#\n\rresource_meta\x18\x02 \x01(\x0cR\x0cresourceMeta\x12\x14\n\x05lines\x18\x03 \x01(\x04R\x05lines\x12\x14\n\x05token\x18\x04 \x01(\tR\x05token\x12\x41\n\rtask_category\x18\x05 \x01(\x0b\x32\x1c.flyteidl.admin.TaskCategoryR\x0ctaskCategory\"1\n\x19GetTaskLogsResponseHeader\x12\x14\n\x05token\x18\x01 \x01(\tR\x05token\"3\n\x17GetTaskLogsResponseBody\x12\x18\n\x07results\x18\x01 \x03(\tR\x07results\"\xa1\x01\n\x13GetTaskLogsResponse\x12\x43\n\x06header\x18\x01 \x01(\x0b\x32).flyteidl.admin.GetTaskLogsResponseHeaderH\x00R\x06header\x12=\n\x04\x62ody\x18\x02 \x01(\x0b\x32\'.flyteidl.admin.GetTaskLogsResponseBodyH\x00R\x04\x62odyB\x06\n\x04part\"\xc4\x01\n\nAgentError\x12\x12\n\x04\x63ode\x18\x01 \x01(\tR\x04\x63ode\x12\x33\n\x04kind\x18\x03 \x01(\x0e\x32\x1f.flyteidl.admin.AgentError.KindR\x04kind\x12?\n\x06origin\x18\x04 \x01(\x0e\x32\'.flyteidl.core.ExecutionError.ErrorKindR\x06origin\",\n\x04Kind\x12\x13\n\x0fNON_RECOVERABLE\x10\x00\x12\x0f\n\x0bRECOVERABLE\x10\x01*b\n\x05State\x12\x15\n\x11RETRYABLE_FAILURE\x10\x00\x12\x15\n\x11PERMANENT_FAILURE\x10\x01\x12\x0b\n\x07PENDING\x10\x02\x12\x0b\n\x07RUNNING\x10\x03\x12\r\n\tSUCCEEDED\x10\x04\x1a\x02\x18\x01\x42\xb6\x01\n\x12\x63om.flyteidl.adminB\nAgentProtoP\x01Z;github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin\xa2\x02\x03\x46\x41X\xaa\x02\x0e\x46lyteidl.Admin\xca\x02\x0e\x46lyteidl\\Admin\xe2\x02\x1a\x46lyteidl\\Admin\\GPBMetadata\xea\x02\x0f\x46lyteidl::Adminb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -52,8 +52,8 @@ _GETTASKMETRICSREQUEST.fields_by_name['task_type']._serialized_options = b'\030\001' _GETTASKLOGSREQUEST.fields_by_name['task_type']._options = None _GETTASKLOGSREQUEST.fields_by_name['task_type']._serialized_options = b'\030\001' - _globals['_STATE']._serialized_start=4354 - _globals['_STATE']._serialized_end=4452 + _globals['_STATE']._serialized_start=4614 + _globals['_STATE']._serialized_end=4712 _globals['_TASKEXECUTIONMETADATA']._serialized_start=351 _globals['_TASKEXECUTIONMETADATA']._serialized_end=1277 _globals['_TASKEXECUTIONMETADATA_LABELSENTRY']._serialized_start=1083 @@ -79,33 +79,37 @@ _globals['_GETTASKRESPONSE']._serialized_start=2423 _globals['_GETTASKRESPONSE']._serialized_end=2494 _globals['_RESOURCE']._serialized_start=2497 - _globals['_RESOURCE']._serialized_end=2804 - _globals['_DELETETASKREQUEST']._serialized_start=2807 - _globals['_DELETETASKREQUEST']._serialized_end=2963 - _globals['_DELETETASKRESPONSE']._serialized_start=2965 - _globals['_DELETETASKRESPONSE']._serialized_end=2985 - _globals['_AGENT']._serialized_start=2988 - _globals['_AGENT']._serialized_end=3184 - _globals['_TASKCATEGORY']._serialized_start=3186 - _globals['_TASKCATEGORY']._serialized_end=3246 - _globals['_GETAGENTREQUEST']._serialized_start=3248 - _globals['_GETAGENTREQUEST']._serialized_end=3285 - _globals['_GETAGENTRESPONSE']._serialized_start=3287 - _globals['_GETAGENTRESPONSE']._serialized_end=3350 - _globals['_LISTAGENTSREQUEST']._serialized_start=3352 - _globals['_LISTAGENTSREQUEST']._serialized_end=3371 - _globals['_LISTAGENTSRESPONSE']._serialized_start=3373 - _globals['_LISTAGENTSRESPONSE']._serialized_end=3440 - _globals['_GETTASKMETRICSREQUEST']._serialized_start=3443 - _globals['_GETTASKMETRICSREQUEST']._serialized_end=3790 - _globals['_GETTASKMETRICSRESPONSE']._serialized_start=3792 - _globals['_GETTASKMETRICSRESPONSE']._serialized_end=3880 - _globals['_GETTASKLOGSREQUEST']._serialized_start=3883 - _globals['_GETTASKLOGSREQUEST']._serialized_end=4084 - _globals['_GETTASKLOGSRESPONSEHEADER']._serialized_start=4086 - _globals['_GETTASKLOGSRESPONSEHEADER']._serialized_end=4135 - _globals['_GETTASKLOGSRESPONSEBODY']._serialized_start=4137 - _globals['_GETTASKLOGSRESPONSEBODY']._serialized_end=4188 - _globals['_GETTASKLOGSRESPONSE']._serialized_start=4191 - _globals['_GETTASKLOGSRESPONSE']._serialized_end=4352 + _globals['_RESOURCE']._serialized_end=2865 + _globals['_DELETETASKREQUEST']._serialized_start=2868 + _globals['_DELETETASKREQUEST']._serialized_end=3024 + _globals['_DELETETASKRESPONSE']._serialized_start=3026 + _globals['_DELETETASKRESPONSE']._serialized_end=3046 + _globals['_AGENT']._serialized_start=3049 + _globals['_AGENT']._serialized_end=3245 + _globals['_TASKCATEGORY']._serialized_start=3247 + _globals['_TASKCATEGORY']._serialized_end=3307 + _globals['_GETAGENTREQUEST']._serialized_start=3309 + _globals['_GETAGENTREQUEST']._serialized_end=3346 + _globals['_GETAGENTRESPONSE']._serialized_start=3348 + _globals['_GETAGENTRESPONSE']._serialized_end=3411 + _globals['_LISTAGENTSREQUEST']._serialized_start=3413 + _globals['_LISTAGENTSREQUEST']._serialized_end=3432 + _globals['_LISTAGENTSRESPONSE']._serialized_start=3434 + _globals['_LISTAGENTSRESPONSE']._serialized_end=3501 + _globals['_GETTASKMETRICSREQUEST']._serialized_start=3504 + _globals['_GETTASKMETRICSREQUEST']._serialized_end=3851 + _globals['_GETTASKMETRICSRESPONSE']._serialized_start=3853 + _globals['_GETTASKMETRICSRESPONSE']._serialized_end=3941 + _globals['_GETTASKLOGSREQUEST']._serialized_start=3944 + _globals['_GETTASKLOGSREQUEST']._serialized_end=4145 + _globals['_GETTASKLOGSRESPONSEHEADER']._serialized_start=4147 + _globals['_GETTASKLOGSRESPONSEHEADER']._serialized_end=4196 + _globals['_GETTASKLOGSRESPONSEBODY']._serialized_start=4198 + _globals['_GETTASKLOGSRESPONSEBODY']._serialized_end=4249 + _globals['_GETTASKLOGSRESPONSE']._serialized_start=4252 + _globals['_GETTASKLOGSRESPONSE']._serialized_end=4413 + _globals['_AGENTERROR']._serialized_start=4416 + _globals['_AGENTERROR']._serialized_end=4612 + _globals['_AGENTERROR_KIND']._serialized_start=4568 + _globals['_AGENTERROR_KIND']._serialized_end=4612 # @@protoc_insertion_point(module_scope) diff --git a/flyteidl/gen/pb_python/flyteidl/admin/agent_pb2.pyi b/flyteidl/gen/pb_python/flyteidl/admin/agent_pb2.pyi index 956b5d5a4d..d4243c2738 100644 --- a/flyteidl/gen/pb_python/flyteidl/admin/agent_pb2.pyi +++ b/flyteidl/gen/pb_python/flyteidl/admin/agent_pb2.pyi @@ -145,20 +145,22 @@ class GetTaskResponse(_message.Message): def __init__(self, resource: _Optional[_Union[Resource, _Mapping]] = ...) -> None: ... class Resource(_message.Message): - __slots__ = ["state", "outputs", "message", "log_links", "phase", "custom_info"] + __slots__ = ["state", "outputs", "message", "log_links", "phase", "custom_info", "agent_error"] STATE_FIELD_NUMBER: _ClassVar[int] OUTPUTS_FIELD_NUMBER: _ClassVar[int] MESSAGE_FIELD_NUMBER: _ClassVar[int] LOG_LINKS_FIELD_NUMBER: _ClassVar[int] PHASE_FIELD_NUMBER: _ClassVar[int] CUSTOM_INFO_FIELD_NUMBER: _ClassVar[int] + AGENT_ERROR_FIELD_NUMBER: _ClassVar[int] state: State outputs: _literals_pb2.LiteralMap message: str log_links: _containers.RepeatedCompositeFieldContainer[_execution_pb2.TaskLog] phase: _execution_pb2.TaskExecution.Phase custom_info: _struct_pb2.Struct - def __init__(self, state: _Optional[_Union[State, str]] = ..., outputs: _Optional[_Union[_literals_pb2.LiteralMap, _Mapping]] = ..., message: _Optional[str] = ..., log_links: _Optional[_Iterable[_Union[_execution_pb2.TaskLog, _Mapping]]] = ..., phase: _Optional[_Union[_execution_pb2.TaskExecution.Phase, str]] = ..., custom_info: _Optional[_Union[_struct_pb2.Struct, _Mapping]] = ...) -> None: ... + agent_error: AgentError + def __init__(self, state: _Optional[_Union[State, str]] = ..., outputs: _Optional[_Union[_literals_pb2.LiteralMap, _Mapping]] = ..., message: _Optional[str] = ..., log_links: _Optional[_Iterable[_Union[_execution_pb2.TaskLog, _Mapping]]] = ..., phase: _Optional[_Union[_execution_pb2.TaskExecution.Phase, str]] = ..., custom_info: _Optional[_Union[_struct_pb2.Struct, _Mapping]] = ..., agent_error: _Optional[_Union[AgentError, _Mapping]] = ...) -> None: ... class DeleteTaskRequest(_message.Message): __slots__ = ["task_type", "resource_meta", "task_category"] @@ -273,3 +275,19 @@ class GetTaskLogsResponse(_message.Message): header: GetTaskLogsResponseHeader body: GetTaskLogsResponseBody def __init__(self, header: _Optional[_Union[GetTaskLogsResponseHeader, _Mapping]] = ..., body: _Optional[_Union[GetTaskLogsResponseBody, _Mapping]] = ...) -> None: ... + +class AgentError(_message.Message): + __slots__ = ["code", "kind", "origin"] + class Kind(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = [] + NON_RECOVERABLE: _ClassVar[AgentError.Kind] + RECOVERABLE: _ClassVar[AgentError.Kind] + NON_RECOVERABLE: AgentError.Kind + RECOVERABLE: AgentError.Kind + CODE_FIELD_NUMBER: _ClassVar[int] + KIND_FIELD_NUMBER: _ClassVar[int] + ORIGIN_FIELD_NUMBER: _ClassVar[int] + code: str + kind: AgentError.Kind + origin: _execution_pb2.ExecutionError.ErrorKind + def __init__(self, code: _Optional[str] = ..., kind: _Optional[_Union[AgentError.Kind, str]] = ..., origin: _Optional[_Union[_execution_pb2.ExecutionError.ErrorKind, str]] = ...) -> None: ... diff --git a/flyteidl/gen/pb_python/flyteidl/admin/common_pb2.py b/flyteidl/gen/pb_python/flyteidl/admin/common_pb2.py index c12fa17b0f..dc3a3ded92 100644 --- a/flyteidl/gen/pb_python/flyteidl/admin/common_pb2.py +++ b/flyteidl/gen/pb_python/flyteidl/admin/common_pb2.py @@ -17,7 +17,7 @@ from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1b\x66lyteidl/admin/common.proto\x12\x0e\x66lyteidl.admin\x1a\x1d\x66lyteidl/core/execution.proto\x1a\x1e\x66lyteidl/core/identifier.proto\x1a\x1c\x66lyteidl/core/literals.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"o\n\x15NamedEntityIdentifier\x12\x18\n\x07project\x18\x01 \x01(\tR\x07project\x12\x16\n\x06\x64omain\x18\x02 \x01(\tR\x06\x64omain\x12\x12\n\x04name\x18\x03 \x01(\tR\x04name\x12\x10\n\x03org\x18\x04 \x01(\tR\x03org\"o\n\x13NamedEntityMetadata\x12 \n\x0b\x64\x65scription\x18\x01 \x01(\tR\x0b\x64\x65scription\x12\x36\n\x05state\x18\x02 \x01(\x0e\x32 .flyteidl.admin.NamedEntityStateR\x05state\"\xc7\x01\n\x0bNamedEntity\x12@\n\rresource_type\x18\x01 \x01(\x0e\x32\x1b.flyteidl.core.ResourceTypeR\x0cresourceType\x12\x35\n\x02id\x18\x02 \x01(\x0b\x32%.flyteidl.admin.NamedEntityIdentifierR\x02id\x12?\n\x08metadata\x18\x03 \x01(\x0b\x32#.flyteidl.admin.NamedEntityMetadataR\x08metadata\"\x82\x01\n\x04Sort\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12<\n\tdirection\x18\x02 \x01(\x0e\x32\x1e.flyteidl.admin.Sort.DirectionR\tdirection\"*\n\tDirection\x12\x0e\n\nDESCENDING\x10\x00\x12\r\n\tASCENDING\x10\x01\"\xdb\x01\n NamedEntityIdentifierListRequest\x12\x18\n\x07project\x18\x01 \x01(\tR\x07project\x12\x16\n\x06\x64omain\x18\x02 \x01(\tR\x06\x64omain\x12\x14\n\x05limit\x18\x03 \x01(\rR\x05limit\x12\x14\n\x05token\x18\x04 \x01(\tR\x05token\x12-\n\x07sort_by\x18\x05 \x01(\x0b\x32\x14.flyteidl.admin.SortR\x06sortBy\x12\x18\n\x07\x66ilters\x18\x06 \x01(\tR\x07\x66ilters\x12\x10\n\x03org\x18\x07 \x01(\tR\x03org\"\x93\x02\n\x16NamedEntityListRequest\x12@\n\rresource_type\x18\x01 \x01(\x0e\x32\x1b.flyteidl.core.ResourceTypeR\x0cresourceType\x12\x18\n\x07project\x18\x02 \x01(\tR\x07project\x12\x16\n\x06\x64omain\x18\x03 \x01(\tR\x06\x64omain\x12\x14\n\x05limit\x18\x04 \x01(\rR\x05limit\x12\x14\n\x05token\x18\x05 \x01(\tR\x05token\x12-\n\x07sort_by\x18\x06 \x01(\x0b\x32\x14.flyteidl.admin.SortR\x06sortBy\x12\x18\n\x07\x66ilters\x18\x07 \x01(\tR\x07\x66ilters\x12\x10\n\x03org\x18\x08 \x01(\tR\x03org\"t\n\x19NamedEntityIdentifierList\x12\x41\n\x08\x65ntities\x18\x01 \x03(\x0b\x32%.flyteidl.admin.NamedEntityIdentifierR\x08\x65ntities\x12\x14\n\x05token\x18\x02 \x01(\tR\x05token\"`\n\x0fNamedEntityList\x12\x37\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x1b.flyteidl.admin.NamedEntityR\x08\x65ntities\x12\x14\n\x05token\x18\x02 \x01(\tR\x05token\"\x90\x01\n\x15NamedEntityGetRequest\x12@\n\rresource_type\x18\x01 \x01(\x0e\x32\x1b.flyteidl.core.ResourceTypeR\x0cresourceType\x12\x35\n\x02id\x18\x02 \x01(\x0b\x32%.flyteidl.admin.NamedEntityIdentifierR\x02id\"\xd4\x01\n\x18NamedEntityUpdateRequest\x12@\n\rresource_type\x18\x01 \x01(\x0e\x32\x1b.flyteidl.core.ResourceTypeR\x0cresourceType\x12\x35\n\x02id\x18\x02 \x01(\x0b\x32%.flyteidl.admin.NamedEntityIdentifierR\x02id\x12?\n\x08metadata\x18\x03 \x01(\x0b\x32#.flyteidl.admin.NamedEntityMetadataR\x08metadata\"\x1b\n\x19NamedEntityUpdateResponse\"=\n\x10ObjectGetRequest\x12)\n\x02id\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierR\x02id\"\xc1\x01\n\x13ResourceListRequest\x12\x35\n\x02id\x18\x01 \x01(\x0b\x32%.flyteidl.admin.NamedEntityIdentifierR\x02id\x12\x14\n\x05limit\x18\x02 \x01(\rR\x05limit\x12\x14\n\x05token\x18\x03 \x01(\tR\x05token\x12\x18\n\x07\x66ilters\x18\x04 \x01(\tR\x07\x66ilters\x12-\n\x07sort_by\x18\x05 \x01(\x0b\x32\x14.flyteidl.admin.SortR\x06sortBy\">\n\x11\x45mailNotification\x12)\n\x10recipients_email\x18\x01 \x03(\tR\x0frecipientsEmail\"B\n\x15PagerDutyNotification\x12)\n\x10recipients_email\x18\x01 \x03(\tR\x0frecipientsEmail\">\n\x11SlackNotification\x12)\n\x10recipients_email\x18\x01 \x03(\tR\x0frecipientsEmail\"\x94\x02\n\x0cNotification\x12>\n\x06phases\x18\x01 \x03(\x0e\x32&.flyteidl.core.WorkflowExecution.PhaseR\x06phases\x12\x39\n\x05\x65mail\x18\x02 \x01(\x0b\x32!.flyteidl.admin.EmailNotificationH\x00R\x05\x65mail\x12\x46\n\npager_duty\x18\x03 \x01(\x0b\x32%.flyteidl.admin.PagerDutyNotificationH\x00R\tpagerDuty\x12\x39\n\x05slack\x18\x04 \x01(\x0b\x32!.flyteidl.admin.SlackNotificationH\x00R\x05slackB\x06\n\x04type\"5\n\x07UrlBlob\x12\x10\n\x03url\x18\x01 \x01(\tR\x03url\x12\x14\n\x05\x62ytes\x18\x02 \x01(\x03R\x05\x62ytes:\x02\x18\x01\"\x7f\n\x06Labels\x12:\n\x06values\x18\x01 \x03(\x0b\x32\".flyteidl.admin.Labels.ValuesEntryR\x06values\x1a\x39\n\x0bValuesEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\"\x89\x01\n\x0b\x41nnotations\x12?\n\x06values\x18\x01 \x03(\x0b\x32\'.flyteidl.admin.Annotations.ValuesEntryR\x06values\x1a\x39\n\x0bValuesEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\";\n\x04\x45nvs\x12\x33\n\x06values\x18\x01 \x03(\x0b\x32\x1b.flyteidl.core.KeyValuePairR\x06values\"z\n\x08\x41uthRole\x12,\n\x12\x61ssumable_iam_role\x18\x01 \x01(\tR\x10\x61ssumableIamRole\x12<\n\x1akubernetes_service_account\x18\x02 \x01(\tR\x18kubernetesServiceAccount:\x02\x18\x01\"K\n\x13RawOutputDataConfig\x12\x34\n\x16output_location_prefix\x18\x01 \x01(\tR\x14outputLocationPrefix\"Q\n\tFlyteURLs\x12\x16\n\x06inputs\x18\x01 \x01(\tR\x06inputs\x12\x18\n\x07outputs\x18\x02 \x01(\tR\x07outputs\x12\x12\n\x04\x64\x65\x63k\x18\x03 \x01(\tR\x04\x64\x65\x63k*\\\n\x10NamedEntityState\x12\x17\n\x13NAMED_ENTITY_ACTIVE\x10\x00\x12\x19\n\x15NAMED_ENTITY_ARCHIVED\x10\x01\x12\x14\n\x10SYSTEM_GENERATED\x10\x02\x42\xb7\x01\n\x12\x63om.flyteidl.adminB\x0b\x43ommonProtoP\x01Z;github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin\xa2\x02\x03\x46\x41X\xaa\x02\x0e\x46lyteidl.Admin\xca\x02\x0e\x46lyteidl\\Admin\xe2\x02\x1a\x46lyteidl\\Admin\\GPBMetadata\xea\x02\x0f\x46lyteidl::Adminb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1b\x66lyteidl/admin/common.proto\x12\x0e\x66lyteidl.admin\x1a\x1d\x66lyteidl/core/execution.proto\x1a\x1e\x66lyteidl/core/identifier.proto\x1a\x1c\x66lyteidl/core/literals.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"o\n\x15NamedEntityIdentifier\x12\x18\n\x07project\x18\x01 \x01(\tR\x07project\x12\x16\n\x06\x64omain\x18\x02 \x01(\tR\x06\x64omain\x12\x12\n\x04name\x18\x03 \x01(\tR\x04name\x12\x10\n\x03org\x18\x04 \x01(\tR\x03org\"o\n\x13NamedEntityMetadata\x12 \n\x0b\x64\x65scription\x18\x01 \x01(\tR\x0b\x64\x65scription\x12\x36\n\x05state\x18\x02 \x01(\x0e\x32 .flyteidl.admin.NamedEntityStateR\x05state\"\xc7\x01\n\x0bNamedEntity\x12@\n\rresource_type\x18\x01 \x01(\x0e\x32\x1b.flyteidl.core.ResourceTypeR\x0cresourceType\x12\x35\n\x02id\x18\x02 \x01(\x0b\x32%.flyteidl.admin.NamedEntityIdentifierR\x02id\x12?\n\x08metadata\x18\x03 \x01(\x0b\x32#.flyteidl.admin.NamedEntityMetadataR\x08metadata\"\x82\x01\n\x04Sort\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12<\n\tdirection\x18\x02 \x01(\x0e\x32\x1e.flyteidl.admin.Sort.DirectionR\tdirection\"*\n\tDirection\x12\x0e\n\nDESCENDING\x10\x00\x12\r\n\tASCENDING\x10\x01\"\xdb\x01\n NamedEntityIdentifierListRequest\x12\x18\n\x07project\x18\x01 \x01(\tR\x07project\x12\x16\n\x06\x64omain\x18\x02 \x01(\tR\x06\x64omain\x12\x14\n\x05limit\x18\x03 \x01(\rR\x05limit\x12\x14\n\x05token\x18\x04 \x01(\tR\x05token\x12-\n\x07sort_by\x18\x05 \x01(\x0b\x32\x14.flyteidl.admin.SortR\x06sortBy\x12\x18\n\x07\x66ilters\x18\x06 \x01(\tR\x07\x66ilters\x12\x10\n\x03org\x18\x07 \x01(\tR\x03org\"\x93\x02\n\x16NamedEntityListRequest\x12@\n\rresource_type\x18\x01 \x01(\x0e\x32\x1b.flyteidl.core.ResourceTypeR\x0cresourceType\x12\x18\n\x07project\x18\x02 \x01(\tR\x07project\x12\x16\n\x06\x64omain\x18\x03 \x01(\tR\x06\x64omain\x12\x14\n\x05limit\x18\x04 \x01(\rR\x05limit\x12\x14\n\x05token\x18\x05 \x01(\tR\x05token\x12-\n\x07sort_by\x18\x06 \x01(\x0b\x32\x14.flyteidl.admin.SortR\x06sortBy\x12\x18\n\x07\x66ilters\x18\x07 \x01(\tR\x07\x66ilters\x12\x10\n\x03org\x18\x08 \x01(\tR\x03org\"t\n\x19NamedEntityIdentifierList\x12\x41\n\x08\x65ntities\x18\x01 \x03(\x0b\x32%.flyteidl.admin.NamedEntityIdentifierR\x08\x65ntities\x12\x14\n\x05token\x18\x02 \x01(\tR\x05token\"`\n\x0fNamedEntityList\x12\x37\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x1b.flyteidl.admin.NamedEntityR\x08\x65ntities\x12\x14\n\x05token\x18\x02 \x01(\tR\x05token\"\x90\x01\n\x15NamedEntityGetRequest\x12@\n\rresource_type\x18\x01 \x01(\x0e\x32\x1b.flyteidl.core.ResourceTypeR\x0cresourceType\x12\x35\n\x02id\x18\x02 \x01(\x0b\x32%.flyteidl.admin.NamedEntityIdentifierR\x02id\"\xd4\x01\n\x18NamedEntityUpdateRequest\x12@\n\rresource_type\x18\x01 \x01(\x0e\x32\x1b.flyteidl.core.ResourceTypeR\x0cresourceType\x12\x35\n\x02id\x18\x02 \x01(\x0b\x32%.flyteidl.admin.NamedEntityIdentifierR\x02id\x12?\n\x08metadata\x18\x03 \x01(\x0b\x32#.flyteidl.admin.NamedEntityMetadataR\x08metadata\"\x1b\n\x19NamedEntityUpdateResponse\"=\n\x10ObjectGetRequest\x12)\n\x02id\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierR\x02id\"\xc1\x01\n\x13ResourceListRequest\x12\x35\n\x02id\x18\x01 \x01(\x0b\x32%.flyteidl.admin.NamedEntityIdentifierR\x02id\x12\x14\n\x05limit\x18\x02 \x01(\rR\x05limit\x12\x14\n\x05token\x18\x03 \x01(\tR\x05token\x12\x18\n\x07\x66ilters\x18\x04 \x01(\tR\x07\x66ilters\x12-\n\x07sort_by\x18\x05 \x01(\x0b\x32\x14.flyteidl.admin.SortR\x06sortBy\">\n\x11\x45mailNotification\x12)\n\x10recipients_email\x18\x01 \x03(\tR\x0frecipientsEmail\"B\n\x15PagerDutyNotification\x12)\n\x10recipients_email\x18\x01 \x03(\tR\x0frecipientsEmail\">\n\x11SlackNotification\x12)\n\x10recipients_email\x18\x01 \x03(\tR\x0frecipientsEmail\"\x94\x02\n\x0cNotification\x12>\n\x06phases\x18\x01 \x03(\x0e\x32&.flyteidl.core.WorkflowExecution.PhaseR\x06phases\x12\x39\n\x05\x65mail\x18\x02 \x01(\x0b\x32!.flyteidl.admin.EmailNotificationH\x00R\x05\x65mail\x12\x46\n\npager_duty\x18\x03 \x01(\x0b\x32%.flyteidl.admin.PagerDutyNotificationH\x00R\tpagerDuty\x12\x39\n\x05slack\x18\x04 \x01(\x0b\x32!.flyteidl.admin.SlackNotificationH\x00R\x05slackB\x06\n\x04type\"5\n\x07UrlBlob\x12\x10\n\x03url\x18\x01 \x01(\tR\x03url\x12\x14\n\x05\x62ytes\x18\x02 \x01(\x03R\x05\x62ytes:\x02\x18\x01\"\x7f\n\x06Labels\x12:\n\x06values\x18\x01 \x03(\x0b\x32\".flyteidl.admin.Labels.ValuesEntryR\x06values\x1a\x39\n\x0bValuesEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\"\x89\x01\n\x0b\x41nnotations\x12?\n\x06values\x18\x01 \x03(\x0b\x32\'.flyteidl.admin.Annotations.ValuesEntryR\x06values\x1a\x39\n\x0bValuesEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\";\n\x04\x45nvs\x12\x33\n\x06values\x18\x01 \x03(\x0b\x32\x1b.flyteidl.core.KeyValuePairR\x06values\"z\n\x08\x41uthRole\x12,\n\x12\x61ssumable_iam_role\x18\x01 \x01(\tR\x10\x61ssumableIamRole\x12<\n\x1akubernetes_service_account\x18\x02 \x01(\tR\x18kubernetesServiceAccount:\x02\x18\x01\"K\n\x13RawOutputDataConfig\x12\x34\n\x16output_location_prefix\x18\x01 \x01(\tR\x14outputLocationPrefix\"Q\n\tFlyteURLs\x12\x16\n\x06inputs\x18\x01 \x01(\tR\x06inputs\x12\x18\n\x07outputs\x18\x02 \x01(\tR\x07outputs\x12\x12\n\x04\x64\x65\x63k\x18\x03 \x01(\tR\x04\x64\x65\x63k*h\n\x10NamedEntityState\x12\x17\n\x13NAMED_ENTITY_ACTIVE\x10\x00\x12\x19\n\x15NAMED_ENTITY_ARCHIVED\x10\x01\x12\x14\n\x10SYSTEM_GENERATED\x10\x02\"\x04\x08\x03\x10\x03\"\x04\x08\x04\x10\x04\x42\xb7\x01\n\x12\x63om.flyteidl.adminB\x0b\x43ommonProtoP\x01Z;github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin\xa2\x02\x03\x46\x41X\xaa\x02\x0e\x46lyteidl.Admin\xca\x02\x0e\x46lyteidl\\Admin\xe2\x02\x1a\x46lyteidl\\Admin\\GPBMetadata\xea\x02\x0f\x46lyteidl::Adminb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -35,7 +35,7 @@ _AUTHROLE._options = None _AUTHROLE._serialized_options = b'\030\001' _globals['_NAMEDENTITYSTATE']._serialized_start=3244 - _globals['_NAMEDENTITYSTATE']._serialized_end=3336 + _globals['_NAMEDENTITYSTATE']._serialized_end=3348 _globals['_NAMEDENTITYIDENTIFIER']._serialized_start=173 _globals['_NAMEDENTITYIDENTIFIER']._serialized_end=284 _globals['_NAMEDENTITYMETADATA']._serialized_start=286 diff --git a/flyteidl/gen/pb_python/flyteidl/admin/execution_pb2.py b/flyteidl/gen/pb_python/flyteidl/admin/execution_pb2.py index ff650d4c55..ddc0b22799 100644 --- a/flyteidl/gen/pb_python/flyteidl/admin/execution_pb2.py +++ b/flyteidl/gen/pb_python/flyteidl/admin/execution_pb2.py @@ -26,7 +26,7 @@ from flyteidl.admin import matchable_resource_pb2 as flyteidl_dot_admin_dot_matchable__resource__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1e\x66lyteidl/admin/execution.proto\x12\x0e\x66lyteidl.admin\x1a\'flyteidl/admin/cluster_assignment.proto\x1a\x1b\x66lyteidl/admin/common.proto\x1a\x1c\x66lyteidl/core/literals.proto\x1a\x1d\x66lyteidl/core/execution.proto\x1a\"flyteidl/core/execution_envs.proto\x1a\x1f\x66lyteidl/core/artifact_id.proto\x1a\x1e\x66lyteidl/core/identifier.proto\x1a\x1b\x66lyteidl/core/metrics.proto\x1a\x1c\x66lyteidl/core/security.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\'flyteidl/admin/matchable_resource.proto\"\xd6\x01\n\x16\x45xecutionCreateRequest\x12\x18\n\x07project\x18\x01 \x01(\tR\x07project\x12\x16\n\x06\x64omain\x18\x02 \x01(\tR\x06\x64omain\x12\x12\n\x04name\x18\x03 \x01(\tR\x04name\x12\x31\n\x04spec\x18\x04 \x01(\x0b\x32\x1d.flyteidl.admin.ExecutionSpecR\x04spec\x12\x31\n\x06inputs\x18\x05 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapR\x06inputs\x12\x10\n\x03org\x18\x06 \x01(\tR\x03org\"\x99\x01\n\x18\x45xecutionRelaunchRequest\x12:\n\x02id\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x02id\x12\x12\n\x04name\x18\x03 \x01(\tR\x04name\x12\'\n\x0foverwrite_cache\x18\x04 \x01(\x08R\x0eoverwriteCacheJ\x04\x08\x02\x10\x03\"\xa8\x01\n\x17\x45xecutionRecoverRequest\x12:\n\x02id\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x02id\x12\x12\n\x04name\x18\x02 \x01(\tR\x04name\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32!.flyteidl.admin.ExecutionMetadataR\x08metadata\"U\n\x17\x45xecutionCreateResponse\x12:\n\x02id\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x02id\"Y\n\x1bWorkflowExecutionGetRequest\x12:\n\x02id\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x02id\"\xb6\x01\n\tExecution\x12:\n\x02id\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x02id\x12\x31\n\x04spec\x18\x02 \x01(\x0b\x32\x1d.flyteidl.admin.ExecutionSpecR\x04spec\x12:\n\x07\x63losure\x18\x03 \x01(\x0b\x32 .flyteidl.admin.ExecutionClosureR\x07\x63losure\"`\n\rExecutionList\x12\x39\n\nexecutions\x18\x01 \x03(\x0b\x32\x19.flyteidl.admin.ExecutionR\nexecutions\x12\x14\n\x05token\x18\x02 \x01(\tR\x05token\"e\n\x0eLiteralMapBlob\x12\x37\n\x06values\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapB\x02\x18\x01H\x00R\x06values\x12\x12\n\x03uri\x18\x02 \x01(\tH\x00R\x03uriB\x06\n\x04\x64\x61ta\"C\n\rAbortMetadata\x12\x14\n\x05\x63\x61use\x18\x01 \x01(\tR\x05\x63\x61use\x12\x1c\n\tprincipal\x18\x02 \x01(\tR\tprincipal\"\x98\x07\n\x10\x45xecutionClosure\x12>\n\x07outputs\x18\x01 \x01(\x0b\x32\x1e.flyteidl.admin.LiteralMapBlobB\x02\x18\x01H\x00R\x07outputs\x12\x35\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x1d.flyteidl.core.ExecutionErrorH\x00R\x05\x65rror\x12%\n\x0b\x61\x62ort_cause\x18\n \x01(\tB\x02\x18\x01H\x00R\nabortCause\x12\x46\n\x0e\x61\x62ort_metadata\x18\x0c \x01(\x0b\x32\x1d.flyteidl.admin.AbortMetadataH\x00R\rabortMetadata\x12@\n\x0boutput_data\x18\r \x01(\x0b\x32\x19.flyteidl.core.LiteralMapB\x02\x18\x01H\x00R\noutputData\x12\x46\n\x0f\x63omputed_inputs\x18\x03 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapB\x02\x18\x01R\x0e\x63omputedInputs\x12<\n\x05phase\x18\x04 \x01(\x0e\x32&.flyteidl.core.WorkflowExecution.PhaseR\x05phase\x12\x39\n\nstarted_at\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tstartedAt\x12\x35\n\x08\x64uration\x18\x06 \x01(\x0b\x32\x19.google.protobuf.DurationR\x08\x64uration\x12\x39\n\ncreated_at\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tcreatedAt\x12\x39\n\nupdated_at\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tupdatedAt\x12\x42\n\rnotifications\x18\t \x03(\x0b\x32\x1c.flyteidl.admin.NotificationR\rnotifications\x12:\n\x0bworkflow_id\x18\x0b \x01(\x0b\x32\x19.flyteidl.core.IdentifierR\nworkflowId\x12]\n\x14state_change_details\x18\x0e \x01(\x0b\x32+.flyteidl.admin.ExecutionStateChangeDetailsR\x12stateChangeDetailsB\x0f\n\routput_result\"[\n\x0eSystemMetadata\x12+\n\x11\x65xecution_cluster\x18\x01 \x01(\tR\x10\x65xecutionCluster\x12\x1c\n\tnamespace\x18\x02 \x01(\tR\tnamespace\"\x85\x05\n\x11\x45xecutionMetadata\x12\x43\n\x04mode\x18\x01 \x01(\x0e\x32/.flyteidl.admin.ExecutionMetadata.ExecutionModeR\x04mode\x12\x1c\n\tprincipal\x18\x02 \x01(\tR\tprincipal\x12\x18\n\x07nesting\x18\x03 \x01(\rR\x07nesting\x12=\n\x0cscheduled_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\x0bscheduledAt\x12Z\n\x15parent_node_execution\x18\x05 \x01(\x0b\x32&.flyteidl.core.NodeExecutionIdentifierR\x13parentNodeExecution\x12[\n\x13reference_execution\x18\x10 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x12referenceExecution\x12G\n\x0fsystem_metadata\x18\x11 \x01(\x0b\x32\x1e.flyteidl.admin.SystemMetadataR\x0esystemMetadata\x12<\n\x0c\x61rtifact_ids\x18\x12 \x03(\x0b\x32\x19.flyteidl.core.ArtifactIDR\x0b\x61rtifactIds\"t\n\rExecutionMode\x12\n\n\x06MANUAL\x10\x00\x12\r\n\tSCHEDULED\x10\x01\x12\n\n\x06SYSTEM\x10\x02\x12\x0c\n\x08RELAUNCH\x10\x03\x12\x12\n\x0e\x43HILD_WORKFLOW\x10\x04\x12\r\n\tRECOVERED\x10\x05\x12\x0b\n\x07TRIGGER\x10\x06\"V\n\x10NotificationList\x12\x42\n\rnotifications\x18\x01 \x03(\x0b\x32\x1c.flyteidl.admin.NotificationR\rnotifications\"\xd6\t\n\rExecutionSpec\x12:\n\x0blaunch_plan\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierR\nlaunchPlan\x12\x35\n\x06inputs\x18\x02 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapB\x02\x18\x01R\x06inputs\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32!.flyteidl.admin.ExecutionMetadataR\x08metadata\x12H\n\rnotifications\x18\x05 \x01(\x0b\x32 .flyteidl.admin.NotificationListH\x00R\rnotifications\x12!\n\x0b\x64isable_all\x18\x06 \x01(\x08H\x00R\ndisableAll\x12.\n\x06labels\x18\x07 \x01(\x0b\x32\x16.flyteidl.admin.LabelsR\x06labels\x12=\n\x0b\x61nnotations\x18\x08 \x01(\x0b\x32\x1b.flyteidl.admin.AnnotationsR\x0b\x61nnotations\x12I\n\x10security_context\x18\n \x01(\x0b\x32\x1e.flyteidl.core.SecurityContextR\x0fsecurityContext\x12\x39\n\tauth_role\x18\x10 \x01(\x0b\x32\x18.flyteidl.admin.AuthRoleB\x02\x18\x01R\x08\x61uthRole\x12M\n\x12quality_of_service\x18\x11 \x01(\x0b\x32\x1f.flyteidl.core.QualityOfServiceR\x10qualityOfService\x12\'\n\x0fmax_parallelism\x18\x12 \x01(\x05R\x0emaxParallelism\x12X\n\x16raw_output_data_config\x18\x13 \x01(\x0b\x32#.flyteidl.admin.RawOutputDataConfigR\x13rawOutputDataConfig\x12P\n\x12\x63luster_assignment\x18\x14 \x01(\x0b\x32!.flyteidl.admin.ClusterAssignmentR\x11\x63lusterAssignment\x12@\n\rinterruptible\x18\x15 \x01(\x0b\x32\x1a.google.protobuf.BoolValueR\rinterruptible\x12\'\n\x0foverwrite_cache\x18\x16 \x01(\x08R\x0eoverwriteCache\x12(\n\x04\x65nvs\x18\x17 \x01(\x0b\x32\x14.flyteidl.admin.EnvsR\x04\x65nvs\x12\x16\n\x04tags\x18\x18 \x03(\tB\x02\x18\x01R\x04tags\x12]\n\x17\x65xecution_cluster_label\x18\x19 \x01(\x0b\x32%.flyteidl.admin.ExecutionClusterLabelR\x15\x65xecutionClusterLabel\x12\x61\n\x19\x65xecution_env_assignments\x18\x1a \x03(\x0b\x32%.flyteidl.core.ExecutionEnvAssignmentR\x17\x65xecutionEnvAssignmentsB\x18\n\x16notification_overridesJ\x04\x08\x04\x10\x05\"m\n\x19\x45xecutionTerminateRequest\x12:\n\x02id\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x02id\x12\x14\n\x05\x63\x61use\x18\x02 \x01(\tR\x05\x63\x61use\"\x1c\n\x1a\x45xecutionTerminateResponse\"]\n\x1fWorkflowExecutionGetDataRequest\x12:\n\x02id\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x02id\"\x88\x02\n WorkflowExecutionGetDataResponse\x12\x35\n\x07outputs\x18\x01 \x01(\x0b\x32\x17.flyteidl.admin.UrlBlobB\x02\x18\x01R\x07outputs\x12\x33\n\x06inputs\x18\x02 \x01(\x0b\x32\x17.flyteidl.admin.UrlBlobB\x02\x18\x01R\x06inputs\x12:\n\x0b\x66ull_inputs\x18\x03 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapR\nfullInputs\x12<\n\x0c\x66ull_outputs\x18\x04 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapR\x0b\x66ullOutputs\"\x8a\x01\n\x16\x45xecutionUpdateRequest\x12:\n\x02id\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x02id\x12\x34\n\x05state\x18\x02 \x01(\x0e\x32\x1e.flyteidl.admin.ExecutionStateR\x05state\"\xae\x01\n\x1b\x45xecutionStateChangeDetails\x12\x34\n\x05state\x18\x01 \x01(\x0e\x32\x1e.flyteidl.admin.ExecutionStateR\x05state\x12;\n\x0boccurred_at\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\noccurredAt\x12\x1c\n\tprincipal\x18\x03 \x01(\tR\tprincipal\"\x19\n\x17\x45xecutionUpdateResponse\"v\n\"WorkflowExecutionGetMetricsRequest\x12:\n\x02id\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x02id\x12\x14\n\x05\x64\x65pth\x18\x02 \x01(\x05R\x05\x64\x65pth\"N\n#WorkflowExecutionGetMetricsResponse\x12\'\n\x04span\x18\x01 \x01(\x0b\x32\x13.flyteidl.core.SpanR\x04span*>\n\x0e\x45xecutionState\x12\x14\n\x10\x45XECUTION_ACTIVE\x10\x00\x12\x16\n\x12\x45XECUTION_ARCHIVED\x10\x01\x42\xba\x01\n\x12\x63om.flyteidl.adminB\x0e\x45xecutionProtoP\x01Z;github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin\xa2\x02\x03\x46\x41X\xaa\x02\x0e\x46lyteidl.Admin\xca\x02\x0e\x46lyteidl\\Admin\xe2\x02\x1a\x46lyteidl\\Admin\\GPBMetadata\xea\x02\x0f\x46lyteidl::Adminb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1e\x66lyteidl/admin/execution.proto\x12\x0e\x66lyteidl.admin\x1a\'flyteidl/admin/cluster_assignment.proto\x1a\x1b\x66lyteidl/admin/common.proto\x1a\x1c\x66lyteidl/core/literals.proto\x1a\x1d\x66lyteidl/core/execution.proto\x1a\"flyteidl/core/execution_envs.proto\x1a\x1f\x66lyteidl/core/artifact_id.proto\x1a\x1e\x66lyteidl/core/identifier.proto\x1a\x1b\x66lyteidl/core/metrics.proto\x1a\x1c\x66lyteidl/core/security.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\'flyteidl/admin/matchable_resource.proto\"\xd6\x01\n\x16\x45xecutionCreateRequest\x12\x18\n\x07project\x18\x01 \x01(\tR\x07project\x12\x16\n\x06\x64omain\x18\x02 \x01(\tR\x06\x64omain\x12\x12\n\x04name\x18\x03 \x01(\tR\x04name\x12\x31\n\x04spec\x18\x04 \x01(\x0b\x32\x1d.flyteidl.admin.ExecutionSpecR\x04spec\x12\x31\n\x06inputs\x18\x05 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapR\x06inputs\x12\x10\n\x03org\x18\x06 \x01(\tR\x03org\"\x99\x01\n\x18\x45xecutionRelaunchRequest\x12:\n\x02id\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x02id\x12\x12\n\x04name\x18\x03 \x01(\tR\x04name\x12\'\n\x0foverwrite_cache\x18\x04 \x01(\x08R\x0eoverwriteCacheJ\x04\x08\x02\x10\x03\"\xa8\x01\n\x17\x45xecutionRecoverRequest\x12:\n\x02id\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x02id\x12\x12\n\x04name\x18\x02 \x01(\tR\x04name\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32!.flyteidl.admin.ExecutionMetadataR\x08metadata\"U\n\x17\x45xecutionCreateResponse\x12:\n\x02id\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x02id\"Y\n\x1bWorkflowExecutionGetRequest\x12:\n\x02id\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x02id\"\xb6\x01\n\tExecution\x12:\n\x02id\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x02id\x12\x31\n\x04spec\x18\x02 \x01(\x0b\x32\x1d.flyteidl.admin.ExecutionSpecR\x04spec\x12:\n\x07\x63losure\x18\x03 \x01(\x0b\x32 .flyteidl.admin.ExecutionClosureR\x07\x63losure\"`\n\rExecutionList\x12\x39\n\nexecutions\x18\x01 \x03(\x0b\x32\x19.flyteidl.admin.ExecutionR\nexecutions\x12\x14\n\x05token\x18\x02 \x01(\tR\x05token\"e\n\x0eLiteralMapBlob\x12\x37\n\x06values\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapB\x02\x18\x01H\x00R\x06values\x12\x12\n\x03uri\x18\x02 \x01(\tH\x00R\x03uriB\x06\n\x04\x64\x61ta\"C\n\rAbortMetadata\x12\x14\n\x05\x63\x61use\x18\x01 \x01(\tR\x05\x63\x61use\x12\x1c\n\tprincipal\x18\x02 \x01(\tR\tprincipal\"\x98\x07\n\x10\x45xecutionClosure\x12>\n\x07outputs\x18\x01 \x01(\x0b\x32\x1e.flyteidl.admin.LiteralMapBlobB\x02\x18\x01H\x00R\x07outputs\x12\x35\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x1d.flyteidl.core.ExecutionErrorH\x00R\x05\x65rror\x12%\n\x0b\x61\x62ort_cause\x18\n \x01(\tB\x02\x18\x01H\x00R\nabortCause\x12\x46\n\x0e\x61\x62ort_metadata\x18\x0c \x01(\x0b\x32\x1d.flyteidl.admin.AbortMetadataH\x00R\rabortMetadata\x12@\n\x0boutput_data\x18\r \x01(\x0b\x32\x19.flyteidl.core.LiteralMapB\x02\x18\x01H\x00R\noutputData\x12\x46\n\x0f\x63omputed_inputs\x18\x03 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapB\x02\x18\x01R\x0e\x63omputedInputs\x12<\n\x05phase\x18\x04 \x01(\x0e\x32&.flyteidl.core.WorkflowExecution.PhaseR\x05phase\x12\x39\n\nstarted_at\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tstartedAt\x12\x35\n\x08\x64uration\x18\x06 \x01(\x0b\x32\x19.google.protobuf.DurationR\x08\x64uration\x12\x39\n\ncreated_at\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tcreatedAt\x12\x39\n\nupdated_at\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tupdatedAt\x12\x42\n\rnotifications\x18\t \x03(\x0b\x32\x1c.flyteidl.admin.NotificationR\rnotifications\x12:\n\x0bworkflow_id\x18\x0b \x01(\x0b\x32\x19.flyteidl.core.IdentifierR\nworkflowId\x12]\n\x14state_change_details\x18\x0e \x01(\x0b\x32+.flyteidl.admin.ExecutionStateChangeDetailsR\x12stateChangeDetailsB\x0f\n\routput_result\"[\n\x0eSystemMetadata\x12+\n\x11\x65xecution_cluster\x18\x01 \x01(\tR\x10\x65xecutionCluster\x12\x1c\n\tnamespace\x18\x02 \x01(\tR\tnamespace\"\x8b\x05\n\x11\x45xecutionMetadata\x12\x43\n\x04mode\x18\x01 \x01(\x0e\x32/.flyteidl.admin.ExecutionMetadata.ExecutionModeR\x04mode\x12\x1c\n\tprincipal\x18\x02 \x01(\tR\tprincipal\x12\x18\n\x07nesting\x18\x03 \x01(\rR\x07nesting\x12=\n\x0cscheduled_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\x0bscheduledAt\x12Z\n\x15parent_node_execution\x18\x05 \x01(\x0b\x32&.flyteidl.core.NodeExecutionIdentifierR\x13parentNodeExecution\x12[\n\x13reference_execution\x18\x10 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x12referenceExecution\x12G\n\x0fsystem_metadata\x18\x11 \x01(\x0b\x32\x1e.flyteidl.admin.SystemMetadataR\x0esystemMetadata\x12<\n\x0c\x61rtifact_ids\x18\x12 \x03(\x0b\x32\x19.flyteidl.core.ArtifactIDR\x0b\x61rtifactIds\"z\n\rExecutionMode\x12\n\n\x06MANUAL\x10\x00\x12\r\n\tSCHEDULED\x10\x01\x12\n\n\x06SYSTEM\x10\x02\x12\x0c\n\x08RELAUNCH\x10\x03\x12\x12\n\x0e\x43HILD_WORKFLOW\x10\x04\x12\r\n\tRECOVERED\x10\x05\x12\x0b\n\x07TRIGGER\x10\x06\"\x04\x08\x07\x10\x07\"V\n\x10NotificationList\x12\x42\n\rnotifications\x18\x01 \x03(\x0b\x32\x1c.flyteidl.admin.NotificationR\rnotifications\"\xd6\t\n\rExecutionSpec\x12:\n\x0blaunch_plan\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierR\nlaunchPlan\x12\x35\n\x06inputs\x18\x02 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapB\x02\x18\x01R\x06inputs\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32!.flyteidl.admin.ExecutionMetadataR\x08metadata\x12H\n\rnotifications\x18\x05 \x01(\x0b\x32 .flyteidl.admin.NotificationListH\x00R\rnotifications\x12!\n\x0b\x64isable_all\x18\x06 \x01(\x08H\x00R\ndisableAll\x12.\n\x06labels\x18\x07 \x01(\x0b\x32\x16.flyteidl.admin.LabelsR\x06labels\x12=\n\x0b\x61nnotations\x18\x08 \x01(\x0b\x32\x1b.flyteidl.admin.AnnotationsR\x0b\x61nnotations\x12I\n\x10security_context\x18\n \x01(\x0b\x32\x1e.flyteidl.core.SecurityContextR\x0fsecurityContext\x12\x39\n\tauth_role\x18\x10 \x01(\x0b\x32\x18.flyteidl.admin.AuthRoleB\x02\x18\x01R\x08\x61uthRole\x12M\n\x12quality_of_service\x18\x11 \x01(\x0b\x32\x1f.flyteidl.core.QualityOfServiceR\x10qualityOfService\x12\'\n\x0fmax_parallelism\x18\x12 \x01(\x05R\x0emaxParallelism\x12X\n\x16raw_output_data_config\x18\x13 \x01(\x0b\x32#.flyteidl.admin.RawOutputDataConfigR\x13rawOutputDataConfig\x12P\n\x12\x63luster_assignment\x18\x14 \x01(\x0b\x32!.flyteidl.admin.ClusterAssignmentR\x11\x63lusterAssignment\x12@\n\rinterruptible\x18\x15 \x01(\x0b\x32\x1a.google.protobuf.BoolValueR\rinterruptible\x12\'\n\x0foverwrite_cache\x18\x16 \x01(\x08R\x0eoverwriteCache\x12(\n\x04\x65nvs\x18\x17 \x01(\x0b\x32\x14.flyteidl.admin.EnvsR\x04\x65nvs\x12\x16\n\x04tags\x18\x18 \x03(\tB\x02\x18\x01R\x04tags\x12]\n\x17\x65xecution_cluster_label\x18\x19 \x01(\x0b\x32%.flyteidl.admin.ExecutionClusterLabelR\x15\x65xecutionClusterLabel\x12\x61\n\x19\x65xecution_env_assignments\x18\x1a \x03(\x0b\x32%.flyteidl.core.ExecutionEnvAssignmentR\x17\x65xecutionEnvAssignmentsB\x18\n\x16notification_overridesJ\x04\x08\x04\x10\x05\"m\n\x19\x45xecutionTerminateRequest\x12:\n\x02id\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x02id\x12\x14\n\x05\x63\x61use\x18\x02 \x01(\tR\x05\x63\x61use\"\x1c\n\x1a\x45xecutionTerminateResponse\"]\n\x1fWorkflowExecutionGetDataRequest\x12:\n\x02id\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x02id\"\x88\x02\n WorkflowExecutionGetDataResponse\x12\x35\n\x07outputs\x18\x01 \x01(\x0b\x32\x17.flyteidl.admin.UrlBlobB\x02\x18\x01R\x07outputs\x12\x33\n\x06inputs\x18\x02 \x01(\x0b\x32\x17.flyteidl.admin.UrlBlobB\x02\x18\x01R\x06inputs\x12:\n\x0b\x66ull_inputs\x18\x03 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapR\nfullInputs\x12<\n\x0c\x66ull_outputs\x18\x04 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapR\x0b\x66ullOutputs\"\x8a\x01\n\x16\x45xecutionUpdateRequest\x12:\n\x02id\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x02id\x12\x34\n\x05state\x18\x02 \x01(\x0e\x32\x1e.flyteidl.admin.ExecutionStateR\x05state\"\xae\x01\n\x1b\x45xecutionStateChangeDetails\x12\x34\n\x05state\x18\x01 \x01(\x0e\x32\x1e.flyteidl.admin.ExecutionStateR\x05state\x12;\n\x0boccurred_at\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\noccurredAt\x12\x1c\n\tprincipal\x18\x03 \x01(\tR\tprincipal\"\x19\n\x17\x45xecutionUpdateResponse\"v\n\"WorkflowExecutionGetMetricsRequest\x12:\n\x02id\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x02id\x12\x14\n\x05\x64\x65pth\x18\x02 \x01(\x05R\x05\x64\x65pth\"N\n#WorkflowExecutionGetMetricsResponse\x12\'\n\x04span\x18\x01 \x01(\x0b\x32\x13.flyteidl.core.SpanR\x04span*>\n\x0e\x45xecutionState\x12\x14\n\x10\x45XECUTION_ACTIVE\x10\x00\x12\x16\n\x12\x45XECUTION_ARCHIVED\x10\x01\x42\xba\x01\n\x12\x63om.flyteidl.adminB\x0e\x45xecutionProtoP\x01Z;github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin\xa2\x02\x03\x46\x41X\xaa\x02\x0e\x46lyteidl.Admin\xca\x02\x0e\x46lyteidl\\Admin\xe2\x02\x1a\x46lyteidl\\Admin\\GPBMetadata\xea\x02\x0f\x46lyteidl::Adminb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -55,8 +55,8 @@ _WORKFLOWEXECUTIONGETDATARESPONSE.fields_by_name['outputs']._serialized_options = b'\030\001' _WORKFLOWEXECUTIONGETDATARESPONSE.fields_by_name['inputs']._options = None _WORKFLOWEXECUTIONGETDATARESPONSE.fields_by_name['inputs']._serialized_options = b'\030\001' - _globals['_EXECUTIONSTATE']._serialized_start=5697 - _globals['_EXECUTIONSTATE']._serialized_end=5759 + _globals['_EXECUTIONSTATE']._serialized_start=5703 + _globals['_EXECUTIONSTATE']._serialized_end=5765 _globals['_EXECUTIONCREATEREQUEST']._serialized_start=480 _globals['_EXECUTIONCREATEREQUEST']._serialized_end=694 _globals['_EXECUTIONRELAUNCHREQUEST']._serialized_start=697 @@ -80,29 +80,29 @@ _globals['_SYSTEMMETADATA']._serialized_start=2579 _globals['_SYSTEMMETADATA']._serialized_end=2670 _globals['_EXECUTIONMETADATA']._serialized_start=2673 - _globals['_EXECUTIONMETADATA']._serialized_end=3318 + _globals['_EXECUTIONMETADATA']._serialized_end=3324 _globals['_EXECUTIONMETADATA_EXECUTIONMODE']._serialized_start=3202 - _globals['_EXECUTIONMETADATA_EXECUTIONMODE']._serialized_end=3318 - _globals['_NOTIFICATIONLIST']._serialized_start=3320 - _globals['_NOTIFICATIONLIST']._serialized_end=3406 - _globals['_EXECUTIONSPEC']._serialized_start=3409 - _globals['_EXECUTIONSPEC']._serialized_end=4647 - _globals['_EXECUTIONTERMINATEREQUEST']._serialized_start=4649 - _globals['_EXECUTIONTERMINATEREQUEST']._serialized_end=4758 - _globals['_EXECUTIONTERMINATERESPONSE']._serialized_start=4760 - _globals['_EXECUTIONTERMINATERESPONSE']._serialized_end=4788 - _globals['_WORKFLOWEXECUTIONGETDATAREQUEST']._serialized_start=4790 - _globals['_WORKFLOWEXECUTIONGETDATAREQUEST']._serialized_end=4883 - _globals['_WORKFLOWEXECUTIONGETDATARESPONSE']._serialized_start=4886 - _globals['_WORKFLOWEXECUTIONGETDATARESPONSE']._serialized_end=5150 - _globals['_EXECUTIONUPDATEREQUEST']._serialized_start=5153 - _globals['_EXECUTIONUPDATEREQUEST']._serialized_end=5291 - _globals['_EXECUTIONSTATECHANGEDETAILS']._serialized_start=5294 - _globals['_EXECUTIONSTATECHANGEDETAILS']._serialized_end=5468 - _globals['_EXECUTIONUPDATERESPONSE']._serialized_start=5470 - _globals['_EXECUTIONUPDATERESPONSE']._serialized_end=5495 - _globals['_WORKFLOWEXECUTIONGETMETRICSREQUEST']._serialized_start=5497 - _globals['_WORKFLOWEXECUTIONGETMETRICSREQUEST']._serialized_end=5615 - _globals['_WORKFLOWEXECUTIONGETMETRICSRESPONSE']._serialized_start=5617 - _globals['_WORKFLOWEXECUTIONGETMETRICSRESPONSE']._serialized_end=5695 + _globals['_EXECUTIONMETADATA_EXECUTIONMODE']._serialized_end=3324 + _globals['_NOTIFICATIONLIST']._serialized_start=3326 + _globals['_NOTIFICATIONLIST']._serialized_end=3412 + _globals['_EXECUTIONSPEC']._serialized_start=3415 + _globals['_EXECUTIONSPEC']._serialized_end=4653 + _globals['_EXECUTIONTERMINATEREQUEST']._serialized_start=4655 + _globals['_EXECUTIONTERMINATEREQUEST']._serialized_end=4764 + _globals['_EXECUTIONTERMINATERESPONSE']._serialized_start=4766 + _globals['_EXECUTIONTERMINATERESPONSE']._serialized_end=4794 + _globals['_WORKFLOWEXECUTIONGETDATAREQUEST']._serialized_start=4796 + _globals['_WORKFLOWEXECUTIONGETDATAREQUEST']._serialized_end=4889 + _globals['_WORKFLOWEXECUTIONGETDATARESPONSE']._serialized_start=4892 + _globals['_WORKFLOWEXECUTIONGETDATARESPONSE']._serialized_end=5156 + _globals['_EXECUTIONUPDATEREQUEST']._serialized_start=5159 + _globals['_EXECUTIONUPDATEREQUEST']._serialized_end=5297 + _globals['_EXECUTIONSTATECHANGEDETAILS']._serialized_start=5300 + _globals['_EXECUTIONSTATECHANGEDETAILS']._serialized_end=5474 + _globals['_EXECUTIONUPDATERESPONSE']._serialized_start=5476 + _globals['_EXECUTIONUPDATERESPONSE']._serialized_end=5501 + _globals['_WORKFLOWEXECUTIONGETMETRICSREQUEST']._serialized_start=5503 + _globals['_WORKFLOWEXECUTIONGETMETRICSREQUEST']._serialized_end=5621 + _globals['_WORKFLOWEXECUTIONGETMETRICSRESPONSE']._serialized_start=5623 + _globals['_WORKFLOWEXECUTIONGETMETRICSRESPONSE']._serialized_end=5701 # @@protoc_insertion_point(module_scope) diff --git a/flyteidl/gen/pb_python/flyteidl/admin/node_execution_pb2.py b/flyteidl/gen/pb_python/flyteidl/admin/node_execution_pb2.py index 93a29df4d6..b3035a8318 100644 --- a/flyteidl/gen/pb_python/flyteidl/admin/node_execution_pb2.py +++ b/flyteidl/gen/pb_python/flyteidl/admin/node_execution_pb2.py @@ -21,7 +21,7 @@ from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n#flyteidl/admin/node_execution.proto\x12\x0e\x66lyteidl.admin\x1a\x1b\x66lyteidl/admin/common.proto\x1a\x1d\x66lyteidl/core/execution.proto\x1a\x1b\x66lyteidl/core/catalog.proto\x1a\x1c\x66lyteidl/core/compiler.proto\x1a\x1e\x66lyteidl/core/identifier.proto\x1a\x1c\x66lyteidl/core/literals.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\"Q\n\x17NodeExecutionGetRequest\x12\x36\n\x02id\x18\x01 \x01(\x0b\x32&.flyteidl.core.NodeExecutionIdentifierR\x02id\"\x99\x02\n\x18NodeExecutionListRequest\x12^\n\x15workflow_execution_id\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x13workflowExecutionId\x12\x14\n\x05limit\x18\x02 \x01(\rR\x05limit\x12\x14\n\x05token\x18\x03 \x01(\tR\x05token\x12\x18\n\x07\x66ilters\x18\x04 \x01(\tR\x07\x66ilters\x12-\n\x07sort_by\x18\x05 \x01(\x0b\x32\x14.flyteidl.admin.SortR\x06sortBy\x12(\n\x10unique_parent_id\x18\x06 \x01(\tR\x0euniqueParentId\"\xea\x01\n\x1fNodeExecutionForTaskListRequest\x12R\n\x11task_execution_id\x18\x01 \x01(\x0b\x32&.flyteidl.core.TaskExecutionIdentifierR\x0ftaskExecutionId\x12\x14\n\x05limit\x18\x02 \x01(\rR\x05limit\x12\x14\n\x05token\x18\x03 \x01(\tR\x05token\x12\x18\n\x07\x66ilters\x18\x04 \x01(\tR\x07\x66ilters\x12-\n\x07sort_by\x18\x05 \x01(\x0b\x32\x14.flyteidl.admin.SortR\x06sortBy\"\xe7\x01\n\rNodeExecution\x12\x36\n\x02id\x18\x01 \x01(\x0b\x32&.flyteidl.core.NodeExecutionIdentifierR\x02id\x12\x1b\n\tinput_uri\x18\x02 \x01(\tR\x08inputUri\x12>\n\x07\x63losure\x18\x03 \x01(\x0b\x32$.flyteidl.admin.NodeExecutionClosureR\x07\x63losure\x12\x41\n\x08metadata\x18\x04 \x01(\x0b\x32%.flyteidl.admin.NodeExecutionMetaDataR\x08metadata\"\xba\x01\n\x15NodeExecutionMetaData\x12\x1f\n\x0bretry_group\x18\x01 \x01(\tR\nretryGroup\x12$\n\x0eis_parent_node\x18\x02 \x01(\x08R\x0cisParentNode\x12 \n\x0cspec_node_id\x18\x03 \x01(\tR\nspecNodeId\x12\x1d\n\nis_dynamic\x18\x04 \x01(\x08R\tisDynamic\x12\x19\n\x08is_array\x18\x05 \x01(\x08R\x07isArray\"q\n\x11NodeExecutionList\x12\x46\n\x0fnode_executions\x18\x01 \x03(\x0b\x32\x1d.flyteidl.admin.NodeExecutionR\x0enodeExecutions\x12\x14\n\x05token\x18\x02 \x01(\tR\x05token\"\xf6\x05\n\x14NodeExecutionClosure\x12#\n\noutput_uri\x18\x01 \x01(\tB\x02\x18\x01H\x00R\toutputUri\x12\x35\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x1d.flyteidl.core.ExecutionErrorH\x00R\x05\x65rror\x12@\n\x0boutput_data\x18\n \x01(\x0b\x32\x19.flyteidl.core.LiteralMapB\x02\x18\x01H\x00R\noutputData\x12\x38\n\x05phase\x18\x03 \x01(\x0e\x32\".flyteidl.core.NodeExecution.PhaseR\x05phase\x12\x39\n\nstarted_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tstartedAt\x12\x35\n\x08\x64uration\x18\x05 \x01(\x0b\x32\x19.google.protobuf.DurationR\x08\x64uration\x12\x39\n\ncreated_at\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tcreatedAt\x12\x39\n\nupdated_at\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tupdatedAt\x12\\\n\x16workflow_node_metadata\x18\x08 \x01(\x0b\x32$.flyteidl.admin.WorkflowNodeMetadataH\x01R\x14workflowNodeMetadata\x12P\n\x12task_node_metadata\x18\t \x01(\x0b\x32 .flyteidl.admin.TaskNodeMetadataH\x01R\x10taskNodeMetadata\x12\x19\n\x08\x64\x65\x63k_uri\x18\x0b \x01(\tR\x07\x64\x65\x63kUri\x12/\n\x14\x64ynamic_job_spec_uri\x18\x0c \x01(\tR\x11\x64ynamicJobSpecUriB\x0f\n\routput_resultB\x11\n\x0ftarget_metadata\"d\n\x14WorkflowNodeMetadata\x12L\n\x0b\x65xecutionId\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x0b\x65xecutionId\"\xc0\x01\n\x10TaskNodeMetadata\x12\x44\n\x0c\x63\x61\x63he_status\x18\x01 \x01(\x0e\x32!.flyteidl.core.CatalogCacheStatusR\x0b\x63\x61\x63heStatus\x12?\n\x0b\x63\x61talog_key\x18\x02 \x01(\x0b\x32\x1e.flyteidl.core.CatalogMetadataR\ncatalogKey\x12%\n\x0e\x63heckpoint_uri\x18\x04 \x01(\tR\rcheckpointUri\"\xce\x01\n\x1b\x44ynamicWorkflowNodeMetadata\x12)\n\x02id\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierR\x02id\x12S\n\x11\x63ompiled_workflow\x18\x02 \x01(\x0b\x32&.flyteidl.core.CompiledWorkflowClosureR\x10\x63ompiledWorkflow\x12/\n\x14\x64ynamic_job_spec_uri\x18\x03 \x01(\tR\x11\x64ynamicJobSpecUri\"U\n\x1bNodeExecutionGetDataRequest\x12\x36\n\x02id\x18\x01 \x01(\x0b\x32&.flyteidl.core.NodeExecutionIdentifierR\x02id\"\x96\x03\n\x1cNodeExecutionGetDataResponse\x12\x33\n\x06inputs\x18\x01 \x01(\x0b\x32\x17.flyteidl.admin.UrlBlobB\x02\x18\x01R\x06inputs\x12\x35\n\x07outputs\x18\x02 \x01(\x0b\x32\x17.flyteidl.admin.UrlBlobB\x02\x18\x01R\x07outputs\x12:\n\x0b\x66ull_inputs\x18\x03 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapR\nfullInputs\x12<\n\x0c\x66ull_outputs\x18\x04 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapR\x0b\x66ullOutputs\x12V\n\x10\x64ynamic_workflow\x18\x10 \x01(\x0b\x32+.flyteidl.admin.DynamicWorkflowNodeMetadataR\x0f\x64ynamicWorkflow\x12\x38\n\nflyte_urls\x18\x11 \x01(\x0b\x32\x19.flyteidl.admin.FlyteURLsR\tflyteUrls\"W\n\x1dGetDynamicNodeWorkflowRequest\x12\x36\n\x02id\x18\x01 \x01(\x0b\x32&.flyteidl.core.NodeExecutionIdentifierR\x02id\"r\n\x1b\x44ynamicNodeWorkflowResponse\x12S\n\x11\x63ompiled_workflow\x18\x01 \x01(\x0b\x32&.flyteidl.core.CompiledWorkflowClosureR\x10\x63ompiledWorkflowB\xbe\x01\n\x12\x63om.flyteidl.adminB\x12NodeExecutionProtoP\x01Z;github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin\xa2\x02\x03\x46\x41X\xaa\x02\x0e\x46lyteidl.Admin\xca\x02\x0e\x46lyteidl\\Admin\xe2\x02\x1a\x46lyteidl\\Admin\\GPBMetadata\xea\x02\x0f\x46lyteidl::Adminb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n#flyteidl/admin/node_execution.proto\x12\x0e\x66lyteidl.admin\x1a\x1b\x66lyteidl/admin/common.proto\x1a\x1d\x66lyteidl/core/execution.proto\x1a\x1b\x66lyteidl/core/catalog.proto\x1a\x1c\x66lyteidl/core/compiler.proto\x1a\x1e\x66lyteidl/core/identifier.proto\x1a\x1c\x66lyteidl/core/literals.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\"Q\n\x17NodeExecutionGetRequest\x12\x36\n\x02id\x18\x01 \x01(\x0b\x32&.flyteidl.core.NodeExecutionIdentifierR\x02id\"\x99\x02\n\x18NodeExecutionListRequest\x12^\n\x15workflow_execution_id\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x13workflowExecutionId\x12\x14\n\x05limit\x18\x02 \x01(\rR\x05limit\x12\x14\n\x05token\x18\x03 \x01(\tR\x05token\x12\x18\n\x07\x66ilters\x18\x04 \x01(\tR\x07\x66ilters\x12-\n\x07sort_by\x18\x05 \x01(\x0b\x32\x14.flyteidl.admin.SortR\x06sortBy\x12(\n\x10unique_parent_id\x18\x06 \x01(\tR\x0euniqueParentId\"\xea\x01\n\x1fNodeExecutionForTaskListRequest\x12R\n\x11task_execution_id\x18\x01 \x01(\x0b\x32&.flyteidl.core.TaskExecutionIdentifierR\x0ftaskExecutionId\x12\x14\n\x05limit\x18\x02 \x01(\rR\x05limit\x12\x14\n\x05token\x18\x03 \x01(\tR\x05token\x12\x18\n\x07\x66ilters\x18\x04 \x01(\tR\x07\x66ilters\x12-\n\x07sort_by\x18\x05 \x01(\x0b\x32\x14.flyteidl.admin.SortR\x06sortBy\"\xe7\x01\n\rNodeExecution\x12\x36\n\x02id\x18\x01 \x01(\x0b\x32&.flyteidl.core.NodeExecutionIdentifierR\x02id\x12\x1b\n\tinput_uri\x18\x02 \x01(\tR\x08inputUri\x12>\n\x07\x63losure\x18\x03 \x01(\x0b\x32$.flyteidl.admin.NodeExecutionClosureR\x07\x63losure\x12\x41\n\x08metadata\x18\x04 \x01(\x0b\x32%.flyteidl.admin.NodeExecutionMetaDataR\x08metadata\"\xd5\x01\n\x15NodeExecutionMetaData\x12\x1f\n\x0bretry_group\x18\x01 \x01(\tR\nretryGroup\x12$\n\x0eis_parent_node\x18\x02 \x01(\x08R\x0cisParentNode\x12 \n\x0cspec_node_id\x18\x03 \x01(\tR\nspecNodeId\x12\x1d\n\nis_dynamic\x18\x04 \x01(\x08R\tisDynamic\x12\x19\n\x08is_array\x18\x05 \x01(\x08R\x07isArray\x12\x19\n\x08is_eager\x18\x06 \x01(\x08R\x07isEager\"q\n\x11NodeExecutionList\x12\x46\n\x0fnode_executions\x18\x01 \x03(\x0b\x32\x1d.flyteidl.admin.NodeExecutionR\x0enodeExecutions\x12\x14\n\x05token\x18\x02 \x01(\tR\x05token\"\xf6\x05\n\x14NodeExecutionClosure\x12#\n\noutput_uri\x18\x01 \x01(\tB\x02\x18\x01H\x00R\toutputUri\x12\x35\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x1d.flyteidl.core.ExecutionErrorH\x00R\x05\x65rror\x12@\n\x0boutput_data\x18\n \x01(\x0b\x32\x19.flyteidl.core.LiteralMapB\x02\x18\x01H\x00R\noutputData\x12\x38\n\x05phase\x18\x03 \x01(\x0e\x32\".flyteidl.core.NodeExecution.PhaseR\x05phase\x12\x39\n\nstarted_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tstartedAt\x12\x35\n\x08\x64uration\x18\x05 \x01(\x0b\x32\x19.google.protobuf.DurationR\x08\x64uration\x12\x39\n\ncreated_at\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tcreatedAt\x12\x39\n\nupdated_at\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tupdatedAt\x12\\\n\x16workflow_node_metadata\x18\x08 \x01(\x0b\x32$.flyteidl.admin.WorkflowNodeMetadataH\x01R\x14workflowNodeMetadata\x12P\n\x12task_node_metadata\x18\t \x01(\x0b\x32 .flyteidl.admin.TaskNodeMetadataH\x01R\x10taskNodeMetadata\x12\x19\n\x08\x64\x65\x63k_uri\x18\x0b \x01(\tR\x07\x64\x65\x63kUri\x12/\n\x14\x64ynamic_job_spec_uri\x18\x0c \x01(\tR\x11\x64ynamicJobSpecUriB\x0f\n\routput_resultB\x11\n\x0ftarget_metadata\"d\n\x14WorkflowNodeMetadata\x12L\n\x0b\x65xecutionId\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x0b\x65xecutionId\"\xc0\x01\n\x10TaskNodeMetadata\x12\x44\n\x0c\x63\x61\x63he_status\x18\x01 \x01(\x0e\x32!.flyteidl.core.CatalogCacheStatusR\x0b\x63\x61\x63heStatus\x12?\n\x0b\x63\x61talog_key\x18\x02 \x01(\x0b\x32\x1e.flyteidl.core.CatalogMetadataR\ncatalogKey\x12%\n\x0e\x63heckpoint_uri\x18\x04 \x01(\tR\rcheckpointUri\"\xce\x01\n\x1b\x44ynamicWorkflowNodeMetadata\x12)\n\x02id\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierR\x02id\x12S\n\x11\x63ompiled_workflow\x18\x02 \x01(\x0b\x32&.flyteidl.core.CompiledWorkflowClosureR\x10\x63ompiledWorkflow\x12/\n\x14\x64ynamic_job_spec_uri\x18\x03 \x01(\tR\x11\x64ynamicJobSpecUri\"U\n\x1bNodeExecutionGetDataRequest\x12\x36\n\x02id\x18\x01 \x01(\x0b\x32&.flyteidl.core.NodeExecutionIdentifierR\x02id\"\x96\x03\n\x1cNodeExecutionGetDataResponse\x12\x33\n\x06inputs\x18\x01 \x01(\x0b\x32\x17.flyteidl.admin.UrlBlobB\x02\x18\x01R\x06inputs\x12\x35\n\x07outputs\x18\x02 \x01(\x0b\x32\x17.flyteidl.admin.UrlBlobB\x02\x18\x01R\x07outputs\x12:\n\x0b\x66ull_inputs\x18\x03 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapR\nfullInputs\x12<\n\x0c\x66ull_outputs\x18\x04 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapR\x0b\x66ullOutputs\x12V\n\x10\x64ynamic_workflow\x18\x10 \x01(\x0b\x32+.flyteidl.admin.DynamicWorkflowNodeMetadataR\x0f\x64ynamicWorkflow\x12\x38\n\nflyte_urls\x18\x11 \x01(\x0b\x32\x19.flyteidl.admin.FlyteURLsR\tflyteUrls\"W\n\x1dGetDynamicNodeWorkflowRequest\x12\x36\n\x02id\x18\x01 \x01(\x0b\x32&.flyteidl.core.NodeExecutionIdentifierR\x02id\"r\n\x1b\x44ynamicNodeWorkflowResponse\x12S\n\x11\x63ompiled_workflow\x18\x01 \x01(\x0b\x32&.flyteidl.core.CompiledWorkflowClosureR\x10\x63ompiledWorkflowB\xbe\x01\n\x12\x63om.flyteidl.adminB\x12NodeExecutionProtoP\x01Z;github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin\xa2\x02\x03\x46\x41X\xaa\x02\x0e\x46lyteidl.Admin\xca\x02\x0e\x46lyteidl\\Admin\xe2\x02\x1a\x46lyteidl\\Admin\\GPBMetadata\xea\x02\x0f\x46lyteidl::Adminb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -47,23 +47,23 @@ _globals['_NODEEXECUTION']._serialized_start=906 _globals['_NODEEXECUTION']._serialized_end=1137 _globals['_NODEEXECUTIONMETADATA']._serialized_start=1140 - _globals['_NODEEXECUTIONMETADATA']._serialized_end=1326 - _globals['_NODEEXECUTIONLIST']._serialized_start=1328 - _globals['_NODEEXECUTIONLIST']._serialized_end=1441 - _globals['_NODEEXECUTIONCLOSURE']._serialized_start=1444 - _globals['_NODEEXECUTIONCLOSURE']._serialized_end=2202 - _globals['_WORKFLOWNODEMETADATA']._serialized_start=2204 - _globals['_WORKFLOWNODEMETADATA']._serialized_end=2304 - _globals['_TASKNODEMETADATA']._serialized_start=2307 - _globals['_TASKNODEMETADATA']._serialized_end=2499 - _globals['_DYNAMICWORKFLOWNODEMETADATA']._serialized_start=2502 - _globals['_DYNAMICWORKFLOWNODEMETADATA']._serialized_end=2708 - _globals['_NODEEXECUTIONGETDATAREQUEST']._serialized_start=2710 - _globals['_NODEEXECUTIONGETDATAREQUEST']._serialized_end=2795 - _globals['_NODEEXECUTIONGETDATARESPONSE']._serialized_start=2798 - _globals['_NODEEXECUTIONGETDATARESPONSE']._serialized_end=3204 - _globals['_GETDYNAMICNODEWORKFLOWREQUEST']._serialized_start=3206 - _globals['_GETDYNAMICNODEWORKFLOWREQUEST']._serialized_end=3293 - _globals['_DYNAMICNODEWORKFLOWRESPONSE']._serialized_start=3295 - _globals['_DYNAMICNODEWORKFLOWRESPONSE']._serialized_end=3409 + _globals['_NODEEXECUTIONMETADATA']._serialized_end=1353 + _globals['_NODEEXECUTIONLIST']._serialized_start=1355 + _globals['_NODEEXECUTIONLIST']._serialized_end=1468 + _globals['_NODEEXECUTIONCLOSURE']._serialized_start=1471 + _globals['_NODEEXECUTIONCLOSURE']._serialized_end=2229 + _globals['_WORKFLOWNODEMETADATA']._serialized_start=2231 + _globals['_WORKFLOWNODEMETADATA']._serialized_end=2331 + _globals['_TASKNODEMETADATA']._serialized_start=2334 + _globals['_TASKNODEMETADATA']._serialized_end=2526 + _globals['_DYNAMICWORKFLOWNODEMETADATA']._serialized_start=2529 + _globals['_DYNAMICWORKFLOWNODEMETADATA']._serialized_end=2735 + _globals['_NODEEXECUTIONGETDATAREQUEST']._serialized_start=2737 + _globals['_NODEEXECUTIONGETDATAREQUEST']._serialized_end=2822 + _globals['_NODEEXECUTIONGETDATARESPONSE']._serialized_start=2825 + _globals['_NODEEXECUTIONGETDATARESPONSE']._serialized_end=3231 + _globals['_GETDYNAMICNODEWORKFLOWREQUEST']._serialized_start=3233 + _globals['_GETDYNAMICNODEWORKFLOWREQUEST']._serialized_end=3320 + _globals['_DYNAMICNODEWORKFLOWRESPONSE']._serialized_start=3322 + _globals['_DYNAMICNODEWORKFLOWRESPONSE']._serialized_end=3436 # @@protoc_insertion_point(module_scope) diff --git a/flyteidl/gen/pb_python/flyteidl/admin/node_execution_pb2.pyi b/flyteidl/gen/pb_python/flyteidl/admin/node_execution_pb2.pyi index 9bf601847d..091e61fb4e 100644 --- a/flyteidl/gen/pb_python/flyteidl/admin/node_execution_pb2.pyi +++ b/flyteidl/gen/pb_python/flyteidl/admin/node_execution_pb2.pyi @@ -62,18 +62,20 @@ class NodeExecution(_message.Message): def __init__(self, id: _Optional[_Union[_identifier_pb2.NodeExecutionIdentifier, _Mapping]] = ..., input_uri: _Optional[str] = ..., closure: _Optional[_Union[NodeExecutionClosure, _Mapping]] = ..., metadata: _Optional[_Union[NodeExecutionMetaData, _Mapping]] = ...) -> None: ... class NodeExecutionMetaData(_message.Message): - __slots__ = ["retry_group", "is_parent_node", "spec_node_id", "is_dynamic", "is_array"] + __slots__ = ["retry_group", "is_parent_node", "spec_node_id", "is_dynamic", "is_array", "is_eager"] RETRY_GROUP_FIELD_NUMBER: _ClassVar[int] IS_PARENT_NODE_FIELD_NUMBER: _ClassVar[int] SPEC_NODE_ID_FIELD_NUMBER: _ClassVar[int] IS_DYNAMIC_FIELD_NUMBER: _ClassVar[int] IS_ARRAY_FIELD_NUMBER: _ClassVar[int] + IS_EAGER_FIELD_NUMBER: _ClassVar[int] retry_group: str is_parent_node: bool spec_node_id: str is_dynamic: bool is_array: bool - def __init__(self, retry_group: _Optional[str] = ..., is_parent_node: bool = ..., spec_node_id: _Optional[str] = ..., is_dynamic: bool = ..., is_array: bool = ...) -> None: ... + is_eager: bool + def __init__(self, retry_group: _Optional[str] = ..., is_parent_node: bool = ..., spec_node_id: _Optional[str] = ..., is_dynamic: bool = ..., is_array: bool = ..., is_eager: bool = ...) -> None: ... class NodeExecutionList(_message.Message): __slots__ = ["node_executions", "token"] diff --git a/flyteidl/gen/pb_python/flyteidl/core/tasks_pb2.py b/flyteidl/gen/pb_python/flyteidl/core/tasks_pb2.py index 6add4552b9..43beeeeca2 100644 --- a/flyteidl/gen/pb_python/flyteidl/core/tasks_pb2.py +++ b/flyteidl/gen/pb_python/flyteidl/core/tasks_pb2.py @@ -19,7 +19,7 @@ from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19\x66lyteidl/core/tasks.proto\x12\rflyteidl.core\x1a\x1e\x66lyteidl/core/identifier.proto\x1a\x1d\x66lyteidl/core/interface.proto\x1a\x1c\x66lyteidl/core/literals.proto\x1a\x1c\x66lyteidl/core/security.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xd0\x02\n\tResources\x12\x42\n\x08requests\x18\x01 \x03(\x0b\x32&.flyteidl.core.Resources.ResourceEntryR\x08requests\x12>\n\x06limits\x18\x02 \x03(\x0b\x32&.flyteidl.core.Resources.ResourceEntryR\x06limits\x1a`\n\rResourceEntry\x12\x39\n\x04name\x18\x01 \x01(\x0e\x32%.flyteidl.core.Resources.ResourceNameR\x04name\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value\"]\n\x0cResourceName\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x07\n\x03\x43PU\x10\x01\x12\x07\n\x03GPU\x10\x02\x12\n\n\x06MEMORY\x10\x03\x12\x0b\n\x07STORAGE\x10\x04\x12\x15\n\x11\x45PHEMERAL_STORAGE\x10\x05\"\x91\x01\n\x0eGPUAccelerator\x12\x16\n\x06\x64\x65vice\x18\x01 \x01(\tR\x06\x64\x65vice\x12&\n\runpartitioned\x18\x02 \x01(\x08H\x00R\runpartitioned\x12\'\n\x0epartition_size\x18\x03 \x01(\tH\x00R\rpartitionSizeB\x16\n\x14partition_size_value\"[\n\x11\x45xtendedResources\x12\x46\n\x0fgpu_accelerator\x18\x01 \x01(\x0b\x32\x1d.flyteidl.core.GPUAcceleratorR\x0egpuAccelerator\"\xac\x01\n\x0fRuntimeMetadata\x12>\n\x04type\x18\x01 \x01(\x0e\x32*.flyteidl.core.RuntimeMetadata.RuntimeTypeR\x04type\x12\x18\n\x07version\x18\x02 \x01(\tR\x07version\x12\x16\n\x06\x66lavor\x18\x03 \x01(\tR\x06\x66lavor\"\'\n\x0bRuntimeType\x12\t\n\x05OTHER\x10\x00\x12\r\n\tFLYTE_SDK\x10\x01\"\xac\x05\n\x0cTaskMetadata\x12\"\n\x0c\x64iscoverable\x18\x01 \x01(\x08R\x0c\x64iscoverable\x12\x38\n\x07runtime\x18\x02 \x01(\x0b\x32\x1e.flyteidl.core.RuntimeMetadataR\x07runtime\x12\x33\n\x07timeout\x18\x04 \x01(\x0b\x32\x19.google.protobuf.DurationR\x07timeout\x12\x36\n\x07retries\x18\x05 \x01(\x0b\x32\x1c.flyteidl.core.RetryStrategyR\x07retries\x12+\n\x11\x64iscovery_version\x18\x06 \x01(\tR\x10\x64iscoveryVersion\x12\x38\n\x18\x64\x65precated_error_message\x18\x07 \x01(\tR\x16\x64\x65precatedErrorMessage\x12&\n\rinterruptible\x18\x08 \x01(\x08H\x00R\rinterruptible\x12-\n\x12\x63\x61\x63he_serializable\x18\t \x01(\x08R\x11\x63\x61\x63heSerializable\x12%\n\x0egenerates_deck\x18\n \x01(\x08R\rgeneratesDeck\x12\x39\n\x04tags\x18\x0b \x03(\x0b\x32%.flyteidl.core.TaskMetadata.TagsEntryR\x04tags\x12*\n\x11pod_template_name\x18\x0c \x01(\tR\x0fpodTemplateName\x12\x35\n\x17\x63\x61\x63he_ignore_input_vars\x18\r \x03(\tR\x14\x63\x61\x63heIgnoreInputVars\x1a\x37\n\tTagsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\x42\x15\n\x13interruptible_value\"\xd6\x05\n\x0cTaskTemplate\x12)\n\x02id\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierR\x02id\x12\x12\n\x04type\x18\x02 \x01(\tR\x04type\x12\x37\n\x08metadata\x18\x03 \x01(\x0b\x32\x1b.flyteidl.core.TaskMetadataR\x08metadata\x12;\n\tinterface\x18\x04 \x01(\x0b\x32\x1d.flyteidl.core.TypedInterfaceR\tinterface\x12/\n\x06\x63ustom\x18\x05 \x01(\x0b\x32\x17.google.protobuf.StructR\x06\x63ustom\x12\x38\n\tcontainer\x18\x06 \x01(\x0b\x32\x18.flyteidl.core.ContainerH\x00R\tcontainer\x12\x30\n\x07k8s_pod\x18\x11 \x01(\x0b\x32\x15.flyteidl.core.K8sPodH\x00R\x06k8sPod\x12&\n\x03sql\x18\x12 \x01(\x0b\x32\x12.flyteidl.core.SqlH\x00R\x03sql\x12*\n\x11task_type_version\x18\x07 \x01(\x05R\x0ftaskTypeVersion\x12I\n\x10security_context\x18\x08 \x01(\x0b\x32\x1e.flyteidl.core.SecurityContextR\x0fsecurityContext\x12O\n\x12\x65xtended_resources\x18\t \x01(\x0b\x32 .flyteidl.core.ExtendedResourcesR\x11\x65xtendedResources\x12?\n\x06\x63onfig\x18\x10 \x03(\x0b\x32\'.flyteidl.core.TaskTemplate.ConfigEntryR\x06\x63onfig\x1a\x39\n\x0b\x43onfigEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\x42\x08\n\x06target\"6\n\rContainerPort\x12%\n\x0e\x63ontainer_port\x18\x01 \x01(\rR\rcontainerPort\"\xfc\x03\n\tContainer\x12\x14\n\x05image\x18\x01 \x01(\tR\x05image\x12\x18\n\x07\x63ommand\x18\x02 \x03(\tR\x07\x63ommand\x12\x12\n\x04\x61rgs\x18\x03 \x03(\tR\x04\x61rgs\x12\x36\n\tresources\x18\x04 \x01(\x0b\x32\x18.flyteidl.core.ResourcesR\tresources\x12-\n\x03\x65nv\x18\x05 \x03(\x0b\x32\x1b.flyteidl.core.KeyValuePairR\x03\x65nv\x12\x37\n\x06\x63onfig\x18\x06 \x03(\x0b\x32\x1b.flyteidl.core.KeyValuePairB\x02\x18\x01R\x06\x63onfig\x12\x32\n\x05ports\x18\x07 \x03(\x0b\x32\x1c.flyteidl.core.ContainerPortR\x05ports\x12\x41\n\x0b\x64\x61ta_config\x18\t \x01(\x0b\x32 .flyteidl.core.DataLoadingConfigR\ndataConfig\x12I\n\x0c\x61rchitecture\x18\n \x01(\x0e\x32%.flyteidl.core.Container.ArchitectureR\x0c\x61rchitecture\"I\n\x0c\x41rchitecture\x12\x0b\n\x07UNKNOWN\x10\x00\x12\t\n\x05\x41MD64\x10\x01\x12\t\n\x05\x41RM64\x10\x02\x12\n\n\x06\x41RM_V6\x10\x03\x12\n\n\x06\x41RM_V7\x10\x04\"\xb5\x02\n\nIOStrategy\x12K\n\rdownload_mode\x18\x01 \x01(\x0e\x32&.flyteidl.core.IOStrategy.DownloadModeR\x0c\x64ownloadMode\x12\x45\n\x0bupload_mode\x18\x02 \x01(\x0e\x32$.flyteidl.core.IOStrategy.UploadModeR\nuploadMode\"L\n\x0c\x44ownloadMode\x12\x12\n\x0e\x44OWNLOAD_EAGER\x10\x00\x12\x13\n\x0f\x44OWNLOAD_STREAM\x10\x01\x12\x13\n\x0f\x44O_NOT_DOWNLOAD\x10\x02\"E\n\nUploadMode\x12\x12\n\x0eUPLOAD_ON_EXIT\x10\x00\x12\x10\n\x0cUPLOAD_EAGER\x10\x01\x12\x11\n\rDO_NOT_UPLOAD\x10\x02\"\xa7\x02\n\x11\x44\x61taLoadingConfig\x12\x18\n\x07\x65nabled\x18\x01 \x01(\x08R\x07\x65nabled\x12\x1d\n\ninput_path\x18\x02 \x01(\tR\tinputPath\x12\x1f\n\x0boutput_path\x18\x03 \x01(\tR\noutputPath\x12I\n\x06\x66ormat\x18\x04 \x01(\x0e\x32\x31.flyteidl.core.DataLoadingConfig.LiteralMapFormatR\x06\x66ormat\x12:\n\x0bio_strategy\x18\x05 \x01(\x0b\x32\x19.flyteidl.core.IOStrategyR\nioStrategy\"1\n\x10LiteralMapFormat\x12\x08\n\x04JSON\x10\x00\x12\x08\n\x04YAML\x10\x01\x12\t\n\x05PROTO\x10\x02\"\xbd\x01\n\x06K8sPod\x12<\n\x08metadata\x18\x01 \x01(\x0b\x32 .flyteidl.core.K8sObjectMetadataR\x08metadata\x12\x32\n\x08pod_spec\x18\x02 \x01(\x0b\x32\x17.google.protobuf.StructR\x07podSpec\x12\x41\n\x0b\x64\x61ta_config\x18\x03 \x01(\x0b\x32 .flyteidl.core.DataLoadingConfigR\ndataConfig\"\xa9\x02\n\x11K8sObjectMetadata\x12\x44\n\x06labels\x18\x01 \x03(\x0b\x32,.flyteidl.core.K8sObjectMetadata.LabelsEntryR\x06labels\x12S\n\x0b\x61nnotations\x18\x02 \x03(\x0b\x32\x31.flyteidl.core.K8sObjectMetadata.AnnotationsEntryR\x0b\x61nnotations\x1a\x39\n\x0bLabelsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\x1a>\n\x10\x41nnotationsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\"\x92\x01\n\x03Sql\x12\x1c\n\tstatement\x18\x01 \x01(\tR\tstatement\x12\x34\n\x07\x64ialect\x18\x02 \x01(\x0e\x32\x1a.flyteidl.core.Sql.DialectR\x07\x64ialect\"7\n\x07\x44ialect\x12\r\n\tUNDEFINED\x10\x00\x12\x08\n\x04\x41NSI\x10\x01\x12\x08\n\x04HIVE\x10\x02\x12\t\n\x05OTHER\x10\x03\x42\xb0\x01\n\x11\x63om.flyteidl.coreB\nTasksProtoP\x01Z:github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core\xa2\x02\x03\x46\x43X\xaa\x02\rFlyteidl.Core\xca\x02\rFlyteidl\\Core\xe2\x02\x19\x46lyteidl\\Core\\GPBMetadata\xea\x02\x0e\x46lyteidl::Coreb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19\x66lyteidl/core/tasks.proto\x12\rflyteidl.core\x1a\x1e\x66lyteidl/core/identifier.proto\x1a\x1d\x66lyteidl/core/interface.proto\x1a\x1c\x66lyteidl/core/literals.proto\x1a\x1c\x66lyteidl/core/security.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xd0\x02\n\tResources\x12\x42\n\x08requests\x18\x01 \x03(\x0b\x32&.flyteidl.core.Resources.ResourceEntryR\x08requests\x12>\n\x06limits\x18\x02 \x03(\x0b\x32&.flyteidl.core.Resources.ResourceEntryR\x06limits\x1a`\n\rResourceEntry\x12\x39\n\x04name\x18\x01 \x01(\x0e\x32%.flyteidl.core.Resources.ResourceNameR\x04name\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value\"]\n\x0cResourceName\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x07\n\x03\x43PU\x10\x01\x12\x07\n\x03GPU\x10\x02\x12\n\n\x06MEMORY\x10\x03\x12\x0b\n\x07STORAGE\x10\x04\x12\x15\n\x11\x45PHEMERAL_STORAGE\x10\x05\"\x91\x01\n\x0eGPUAccelerator\x12\x16\n\x06\x64\x65vice\x18\x01 \x01(\tR\x06\x64\x65vice\x12&\n\runpartitioned\x18\x02 \x01(\x08H\x00R\runpartitioned\x12\'\n\x0epartition_size\x18\x03 \x01(\tH\x00R\rpartitionSizeB\x16\n\x14partition_size_value\"[\n\x11\x45xtendedResources\x12\x46\n\x0fgpu_accelerator\x18\x01 \x01(\x0b\x32\x1d.flyteidl.core.GPUAcceleratorR\x0egpuAccelerator\"\xac\x01\n\x0fRuntimeMetadata\x12>\n\x04type\x18\x01 \x01(\x0e\x32*.flyteidl.core.RuntimeMetadata.RuntimeTypeR\x04type\x12\x18\n\x07version\x18\x02 \x01(\tR\x07version\x12\x16\n\x06\x66lavor\x18\x03 \x01(\tR\x06\x66lavor\"\'\n\x0bRuntimeType\x12\t\n\x05OTHER\x10\x00\x12\r\n\tFLYTE_SDK\x10\x01\"\xc7\x05\n\x0cTaskMetadata\x12\"\n\x0c\x64iscoverable\x18\x01 \x01(\x08R\x0c\x64iscoverable\x12\x38\n\x07runtime\x18\x02 \x01(\x0b\x32\x1e.flyteidl.core.RuntimeMetadataR\x07runtime\x12\x33\n\x07timeout\x18\x04 \x01(\x0b\x32\x19.google.protobuf.DurationR\x07timeout\x12\x36\n\x07retries\x18\x05 \x01(\x0b\x32\x1c.flyteidl.core.RetryStrategyR\x07retries\x12+\n\x11\x64iscovery_version\x18\x06 \x01(\tR\x10\x64iscoveryVersion\x12\x38\n\x18\x64\x65precated_error_message\x18\x07 \x01(\tR\x16\x64\x65precatedErrorMessage\x12&\n\rinterruptible\x18\x08 \x01(\x08H\x00R\rinterruptible\x12-\n\x12\x63\x61\x63he_serializable\x18\t \x01(\x08R\x11\x63\x61\x63heSerializable\x12%\n\x0egenerates_deck\x18\n \x01(\x08R\rgeneratesDeck\x12\x39\n\x04tags\x18\x0b \x03(\x0b\x32%.flyteidl.core.TaskMetadata.TagsEntryR\x04tags\x12*\n\x11pod_template_name\x18\x0c \x01(\tR\x0fpodTemplateName\x12\x35\n\x17\x63\x61\x63he_ignore_input_vars\x18\r \x03(\tR\x14\x63\x61\x63heIgnoreInputVars\x12\x19\n\x08is_eager\x18\x0e \x01(\x08R\x07isEager\x1a\x37\n\tTagsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\x42\x15\n\x13interruptible_value\"\xd6\x05\n\x0cTaskTemplate\x12)\n\x02id\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierR\x02id\x12\x12\n\x04type\x18\x02 \x01(\tR\x04type\x12\x37\n\x08metadata\x18\x03 \x01(\x0b\x32\x1b.flyteidl.core.TaskMetadataR\x08metadata\x12;\n\tinterface\x18\x04 \x01(\x0b\x32\x1d.flyteidl.core.TypedInterfaceR\tinterface\x12/\n\x06\x63ustom\x18\x05 \x01(\x0b\x32\x17.google.protobuf.StructR\x06\x63ustom\x12\x38\n\tcontainer\x18\x06 \x01(\x0b\x32\x18.flyteidl.core.ContainerH\x00R\tcontainer\x12\x30\n\x07k8s_pod\x18\x11 \x01(\x0b\x32\x15.flyteidl.core.K8sPodH\x00R\x06k8sPod\x12&\n\x03sql\x18\x12 \x01(\x0b\x32\x12.flyteidl.core.SqlH\x00R\x03sql\x12*\n\x11task_type_version\x18\x07 \x01(\x05R\x0ftaskTypeVersion\x12I\n\x10security_context\x18\x08 \x01(\x0b\x32\x1e.flyteidl.core.SecurityContextR\x0fsecurityContext\x12O\n\x12\x65xtended_resources\x18\t \x01(\x0b\x32 .flyteidl.core.ExtendedResourcesR\x11\x65xtendedResources\x12?\n\x06\x63onfig\x18\x10 \x03(\x0b\x32\'.flyteidl.core.TaskTemplate.ConfigEntryR\x06\x63onfig\x1a\x39\n\x0b\x43onfigEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\x42\x08\n\x06target\"6\n\rContainerPort\x12%\n\x0e\x63ontainer_port\x18\x01 \x01(\rR\rcontainerPort\"\xfc\x03\n\tContainer\x12\x14\n\x05image\x18\x01 \x01(\tR\x05image\x12\x18\n\x07\x63ommand\x18\x02 \x03(\tR\x07\x63ommand\x12\x12\n\x04\x61rgs\x18\x03 \x03(\tR\x04\x61rgs\x12\x36\n\tresources\x18\x04 \x01(\x0b\x32\x18.flyteidl.core.ResourcesR\tresources\x12-\n\x03\x65nv\x18\x05 \x03(\x0b\x32\x1b.flyteidl.core.KeyValuePairR\x03\x65nv\x12\x37\n\x06\x63onfig\x18\x06 \x03(\x0b\x32\x1b.flyteidl.core.KeyValuePairB\x02\x18\x01R\x06\x63onfig\x12\x32\n\x05ports\x18\x07 \x03(\x0b\x32\x1c.flyteidl.core.ContainerPortR\x05ports\x12\x41\n\x0b\x64\x61ta_config\x18\t \x01(\x0b\x32 .flyteidl.core.DataLoadingConfigR\ndataConfig\x12I\n\x0c\x61rchitecture\x18\n \x01(\x0e\x32%.flyteidl.core.Container.ArchitectureR\x0c\x61rchitecture\"I\n\x0c\x41rchitecture\x12\x0b\n\x07UNKNOWN\x10\x00\x12\t\n\x05\x41MD64\x10\x01\x12\t\n\x05\x41RM64\x10\x02\x12\n\n\x06\x41RM_V6\x10\x03\x12\n\n\x06\x41RM_V7\x10\x04\"\xb5\x02\n\nIOStrategy\x12K\n\rdownload_mode\x18\x01 \x01(\x0e\x32&.flyteidl.core.IOStrategy.DownloadModeR\x0c\x64ownloadMode\x12\x45\n\x0bupload_mode\x18\x02 \x01(\x0e\x32$.flyteidl.core.IOStrategy.UploadModeR\nuploadMode\"L\n\x0c\x44ownloadMode\x12\x12\n\x0e\x44OWNLOAD_EAGER\x10\x00\x12\x13\n\x0f\x44OWNLOAD_STREAM\x10\x01\x12\x13\n\x0f\x44O_NOT_DOWNLOAD\x10\x02\"E\n\nUploadMode\x12\x12\n\x0eUPLOAD_ON_EXIT\x10\x00\x12\x10\n\x0cUPLOAD_EAGER\x10\x01\x12\x11\n\rDO_NOT_UPLOAD\x10\x02\"\xa7\x02\n\x11\x44\x61taLoadingConfig\x12\x18\n\x07\x65nabled\x18\x01 \x01(\x08R\x07\x65nabled\x12\x1d\n\ninput_path\x18\x02 \x01(\tR\tinputPath\x12\x1f\n\x0boutput_path\x18\x03 \x01(\tR\noutputPath\x12I\n\x06\x66ormat\x18\x04 \x01(\x0e\x32\x31.flyteidl.core.DataLoadingConfig.LiteralMapFormatR\x06\x66ormat\x12:\n\x0bio_strategy\x18\x05 \x01(\x0b\x32\x19.flyteidl.core.IOStrategyR\nioStrategy\"1\n\x10LiteralMapFormat\x12\x08\n\x04JSON\x10\x00\x12\x08\n\x04YAML\x10\x01\x12\t\n\x05PROTO\x10\x02\"\xbd\x01\n\x06K8sPod\x12<\n\x08metadata\x18\x01 \x01(\x0b\x32 .flyteidl.core.K8sObjectMetadataR\x08metadata\x12\x32\n\x08pod_spec\x18\x02 \x01(\x0b\x32\x17.google.protobuf.StructR\x07podSpec\x12\x41\n\x0b\x64\x61ta_config\x18\x03 \x01(\x0b\x32 .flyteidl.core.DataLoadingConfigR\ndataConfig\"\xa9\x02\n\x11K8sObjectMetadata\x12\x44\n\x06labels\x18\x01 \x03(\x0b\x32,.flyteidl.core.K8sObjectMetadata.LabelsEntryR\x06labels\x12S\n\x0b\x61nnotations\x18\x02 \x03(\x0b\x32\x31.flyteidl.core.K8sObjectMetadata.AnnotationsEntryR\x0b\x61nnotations\x1a\x39\n\x0bLabelsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\x1a>\n\x10\x41nnotationsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\"\x92\x01\n\x03Sql\x12\x1c\n\tstatement\x18\x01 \x01(\tR\tstatement\x12\x34\n\x07\x64ialect\x18\x02 \x01(\x0e\x32\x1a.flyteidl.core.Sql.DialectR\x07\x64ialect\"7\n\x07\x44ialect\x12\r\n\tUNDEFINED\x10\x00\x12\x08\n\x04\x41NSI\x10\x01\x12\x08\n\x04HIVE\x10\x02\x12\t\n\x05OTHER\x10\x03\x42\xb0\x01\n\x11\x63om.flyteidl.coreB\nTasksProtoP\x01Z:github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core\xa2\x02\x03\x46\x43X\xaa\x02\rFlyteidl.Core\xca\x02\rFlyteidl\\Core\xe2\x02\x19\x46lyteidl\\Core\\GPBMetadata\xea\x02\x0e\x46lyteidl::Coreb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -53,39 +53,39 @@ _globals['_RUNTIMEMETADATA_RUNTIMETYPE']._serialized_start=943 _globals['_RUNTIMEMETADATA_RUNTIMETYPE']._serialized_end=982 _globals['_TASKMETADATA']._serialized_start=985 - _globals['_TASKMETADATA']._serialized_end=1669 - _globals['_TASKMETADATA_TAGSENTRY']._serialized_start=1591 - _globals['_TASKMETADATA_TAGSENTRY']._serialized_end=1646 - _globals['_TASKTEMPLATE']._serialized_start=1672 - _globals['_TASKTEMPLATE']._serialized_end=2398 - _globals['_TASKTEMPLATE_CONFIGENTRY']._serialized_start=2331 - _globals['_TASKTEMPLATE_CONFIGENTRY']._serialized_end=2388 - _globals['_CONTAINERPORT']._serialized_start=2400 - _globals['_CONTAINERPORT']._serialized_end=2454 - _globals['_CONTAINER']._serialized_start=2457 - _globals['_CONTAINER']._serialized_end=2965 - _globals['_CONTAINER_ARCHITECTURE']._serialized_start=2892 - _globals['_CONTAINER_ARCHITECTURE']._serialized_end=2965 - _globals['_IOSTRATEGY']._serialized_start=2968 - _globals['_IOSTRATEGY']._serialized_end=3277 - _globals['_IOSTRATEGY_DOWNLOADMODE']._serialized_start=3130 - _globals['_IOSTRATEGY_DOWNLOADMODE']._serialized_end=3206 - _globals['_IOSTRATEGY_UPLOADMODE']._serialized_start=3208 - _globals['_IOSTRATEGY_UPLOADMODE']._serialized_end=3277 - _globals['_DATALOADINGCONFIG']._serialized_start=3280 - _globals['_DATALOADINGCONFIG']._serialized_end=3575 - _globals['_DATALOADINGCONFIG_LITERALMAPFORMAT']._serialized_start=3526 - _globals['_DATALOADINGCONFIG_LITERALMAPFORMAT']._serialized_end=3575 - _globals['_K8SPOD']._serialized_start=3578 - _globals['_K8SPOD']._serialized_end=3767 - _globals['_K8SOBJECTMETADATA']._serialized_start=3770 - _globals['_K8SOBJECTMETADATA']._serialized_end=4067 - _globals['_K8SOBJECTMETADATA_LABELSENTRY']._serialized_start=3946 - _globals['_K8SOBJECTMETADATA_LABELSENTRY']._serialized_end=4003 - _globals['_K8SOBJECTMETADATA_ANNOTATIONSENTRY']._serialized_start=4005 - _globals['_K8SOBJECTMETADATA_ANNOTATIONSENTRY']._serialized_end=4067 - _globals['_SQL']._serialized_start=4070 - _globals['_SQL']._serialized_end=4216 - _globals['_SQL_DIALECT']._serialized_start=4161 - _globals['_SQL_DIALECT']._serialized_end=4216 + _globals['_TASKMETADATA']._serialized_end=1696 + _globals['_TASKMETADATA_TAGSENTRY']._serialized_start=1618 + _globals['_TASKMETADATA_TAGSENTRY']._serialized_end=1673 + _globals['_TASKTEMPLATE']._serialized_start=1699 + _globals['_TASKTEMPLATE']._serialized_end=2425 + _globals['_TASKTEMPLATE_CONFIGENTRY']._serialized_start=2358 + _globals['_TASKTEMPLATE_CONFIGENTRY']._serialized_end=2415 + _globals['_CONTAINERPORT']._serialized_start=2427 + _globals['_CONTAINERPORT']._serialized_end=2481 + _globals['_CONTAINER']._serialized_start=2484 + _globals['_CONTAINER']._serialized_end=2992 + _globals['_CONTAINER_ARCHITECTURE']._serialized_start=2919 + _globals['_CONTAINER_ARCHITECTURE']._serialized_end=2992 + _globals['_IOSTRATEGY']._serialized_start=2995 + _globals['_IOSTRATEGY']._serialized_end=3304 + _globals['_IOSTRATEGY_DOWNLOADMODE']._serialized_start=3157 + _globals['_IOSTRATEGY_DOWNLOADMODE']._serialized_end=3233 + _globals['_IOSTRATEGY_UPLOADMODE']._serialized_start=3235 + _globals['_IOSTRATEGY_UPLOADMODE']._serialized_end=3304 + _globals['_DATALOADINGCONFIG']._serialized_start=3307 + _globals['_DATALOADINGCONFIG']._serialized_end=3602 + _globals['_DATALOADINGCONFIG_LITERALMAPFORMAT']._serialized_start=3553 + _globals['_DATALOADINGCONFIG_LITERALMAPFORMAT']._serialized_end=3602 + _globals['_K8SPOD']._serialized_start=3605 + _globals['_K8SPOD']._serialized_end=3794 + _globals['_K8SOBJECTMETADATA']._serialized_start=3797 + _globals['_K8SOBJECTMETADATA']._serialized_end=4094 + _globals['_K8SOBJECTMETADATA_LABELSENTRY']._serialized_start=3973 + _globals['_K8SOBJECTMETADATA_LABELSENTRY']._serialized_end=4030 + _globals['_K8SOBJECTMETADATA_ANNOTATIONSENTRY']._serialized_start=4032 + _globals['_K8SOBJECTMETADATA_ANNOTATIONSENTRY']._serialized_end=4094 + _globals['_SQL']._serialized_start=4097 + _globals['_SQL']._serialized_end=4243 + _globals['_SQL_DIALECT']._serialized_start=4188 + _globals['_SQL_DIALECT']._serialized_end=4243 # @@protoc_insertion_point(module_scope) diff --git a/flyteidl/gen/pb_python/flyteidl/core/tasks_pb2.pyi b/flyteidl/gen/pb_python/flyteidl/core/tasks_pb2.pyi index 98d1792aee..9e79c295ec 100644 --- a/flyteidl/gen/pb_python/flyteidl/core/tasks_pb2.pyi +++ b/flyteidl/gen/pb_python/flyteidl/core/tasks_pb2.pyi @@ -74,7 +74,7 @@ class RuntimeMetadata(_message.Message): def __init__(self, type: _Optional[_Union[RuntimeMetadata.RuntimeType, str]] = ..., version: _Optional[str] = ..., flavor: _Optional[str] = ...) -> None: ... class TaskMetadata(_message.Message): - __slots__ = ["discoverable", "runtime", "timeout", "retries", "discovery_version", "deprecated_error_message", "interruptible", "cache_serializable", "generates_deck", "tags", "pod_template_name", "cache_ignore_input_vars"] + __slots__ = ["discoverable", "runtime", "timeout", "retries", "discovery_version", "deprecated_error_message", "interruptible", "cache_serializable", "generates_deck", "tags", "pod_template_name", "cache_ignore_input_vars", "is_eager"] class TagsEntry(_message.Message): __slots__ = ["key", "value"] KEY_FIELD_NUMBER: _ClassVar[int] @@ -94,6 +94,7 @@ class TaskMetadata(_message.Message): TAGS_FIELD_NUMBER: _ClassVar[int] POD_TEMPLATE_NAME_FIELD_NUMBER: _ClassVar[int] CACHE_IGNORE_INPUT_VARS_FIELD_NUMBER: _ClassVar[int] + IS_EAGER_FIELD_NUMBER: _ClassVar[int] discoverable: bool runtime: RuntimeMetadata timeout: _duration_pb2.Duration @@ -106,7 +107,8 @@ class TaskMetadata(_message.Message): tags: _containers.ScalarMap[str, str] pod_template_name: str cache_ignore_input_vars: _containers.RepeatedScalarFieldContainer[str] - def __init__(self, discoverable: bool = ..., runtime: _Optional[_Union[RuntimeMetadata, _Mapping]] = ..., timeout: _Optional[_Union[_duration_pb2.Duration, _Mapping]] = ..., retries: _Optional[_Union[_literals_pb2.RetryStrategy, _Mapping]] = ..., discovery_version: _Optional[str] = ..., deprecated_error_message: _Optional[str] = ..., interruptible: bool = ..., cache_serializable: bool = ..., generates_deck: bool = ..., tags: _Optional[_Mapping[str, str]] = ..., pod_template_name: _Optional[str] = ..., cache_ignore_input_vars: _Optional[_Iterable[str]] = ...) -> None: ... + is_eager: bool + def __init__(self, discoverable: bool = ..., runtime: _Optional[_Union[RuntimeMetadata, _Mapping]] = ..., timeout: _Optional[_Union[_duration_pb2.Duration, _Mapping]] = ..., retries: _Optional[_Union[_literals_pb2.RetryStrategy, _Mapping]] = ..., discovery_version: _Optional[str] = ..., deprecated_error_message: _Optional[str] = ..., interruptible: bool = ..., cache_serializable: bool = ..., generates_deck: bool = ..., tags: _Optional[_Mapping[str, str]] = ..., pod_template_name: _Optional[str] = ..., cache_ignore_input_vars: _Optional[_Iterable[str]] = ..., is_eager: bool = ...) -> None: ... class TaskTemplate(_message.Message): __slots__ = ["id", "type", "metadata", "interface", "custom", "container", "k8s_pod", "sql", "task_type_version", "security_context", "extended_resources", "config"] diff --git a/flyteidl/gen/pb_python/flyteidl/core/workflow_pb2.py b/flyteidl/gen/pb_python/flyteidl/core/workflow_pb2.py index 0c62aca3ad..9f5b5f0881 100644 --- a/flyteidl/gen/pb_python/flyteidl/core/workflow_pb2.py +++ b/flyteidl/gen/pb_python/flyteidl/core/workflow_pb2.py @@ -23,7 +23,7 @@ from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1c\x66lyteidl/core/workflow.proto\x12\rflyteidl.core\x1a\x1d\x66lyteidl/core/condition.proto\x1a\x1d\x66lyteidl/core/execution.proto\x1a\x1e\x66lyteidl/core/identifier.proto\x1a\x1d\x66lyteidl/core/interface.proto\x1a\x1c\x66lyteidl/core/literals.proto\x1a\x19\x66lyteidl/core/tasks.proto\x1a\x19\x66lyteidl/core/types.proto\x1a\x1c\x66lyteidl/core/security.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\"{\n\x07IfBlock\x12>\n\tcondition\x18\x01 \x01(\x0b\x32 .flyteidl.core.BooleanExpressionR\tcondition\x12\x30\n\tthen_node\x18\x02 \x01(\x0b\x32\x13.flyteidl.core.NodeR\x08thenNode\"\xd4\x01\n\x0bIfElseBlock\x12*\n\x04\x63\x61se\x18\x01 \x01(\x0b\x32\x16.flyteidl.core.IfBlockR\x04\x63\x61se\x12,\n\x05other\x18\x02 \x03(\x0b\x32\x16.flyteidl.core.IfBlockR\x05other\x12\x32\n\telse_node\x18\x03 \x01(\x0b\x32\x13.flyteidl.core.NodeH\x00R\x08\x65lseNode\x12,\n\x05\x65rror\x18\x04 \x01(\x0b\x32\x14.flyteidl.core.ErrorH\x00R\x05\x65rrorB\t\n\x07\x64\x65\x66\x61ult\"A\n\nBranchNode\x12\x33\n\x07if_else\x18\x01 \x01(\x0b\x32\x1a.flyteidl.core.IfElseBlockR\x06ifElse\"\x97\x01\n\x08TaskNode\x12>\n\x0creference_id\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierH\x00R\x0breferenceId\x12>\n\toverrides\x18\x02 \x01(\x0b\x32 .flyteidl.core.TaskNodeOverridesR\toverridesB\x0b\n\treference\"\xa6\x01\n\x0cWorkflowNode\x12\x42\n\x0elaunchplan_ref\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierH\x00R\rlaunchplanRef\x12\x45\n\x10sub_workflow_ref\x18\x02 \x01(\x0b\x32\x19.flyteidl.core.IdentifierH\x00R\x0esubWorkflowRefB\x0b\n\treference\"/\n\x10\x41pproveCondition\x12\x1b\n\tsignal_id\x18\x01 \x01(\tR\x08signalId\"\x90\x01\n\x0fSignalCondition\x12\x1b\n\tsignal_id\x18\x01 \x01(\tR\x08signalId\x12.\n\x04type\x18\x02 \x01(\x0b\x32\x1a.flyteidl.core.LiteralTypeR\x04type\x12\x30\n\x14output_variable_name\x18\x03 \x01(\tR\x12outputVariableName\"G\n\x0eSleepCondition\x12\x35\n\x08\x64uration\x18\x01 \x01(\x0b\x32\x19.google.protobuf.DurationR\x08\x64uration\"\xc5\x01\n\x08GateNode\x12;\n\x07\x61pprove\x18\x01 \x01(\x0b\x32\x1f.flyteidl.core.ApproveConditionH\x00R\x07\x61pprove\x12\x38\n\x06signal\x18\x02 \x01(\x0b\x32\x1e.flyteidl.core.SignalConditionH\x00R\x06signal\x12\x35\n\x05sleep\x18\x03 \x01(\x0b\x32\x1d.flyteidl.core.SleepConditionH\x00R\x05sleepB\x0b\n\tcondition\"\xda\x02\n\tArrayNode\x12\'\n\x04node\x18\x01 \x01(\x0b\x32\x13.flyteidl.core.NodeR\x04node\x12\"\n\x0bparallelism\x18\x02 \x01(\rH\x00R\x0bparallelism\x12%\n\rmin_successes\x18\x03 \x01(\rH\x01R\x0cminSuccesses\x12,\n\x11min_success_ratio\x18\x04 \x01(\x02H\x01R\x0fminSuccessRatio\x12M\n\x0e\x65xecution_mode\x18\x05 \x01(\x0e\x32&.flyteidl.core.ArrayNode.ExecutionModeR\rexecutionMode\"2\n\rExecutionMode\x12\x11\n\rMINIMAL_STATE\x10\x00\x12\x0e\n\nFULL_STATE\x10\x01\x42\x14\n\x12parallelism_optionB\x12\n\x10success_criteria\"\x8c\x03\n\x0cNodeMetadata\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x33\n\x07timeout\x18\x04 \x01(\x0b\x32\x19.google.protobuf.DurationR\x07timeout\x12\x36\n\x07retries\x18\x05 \x01(\x0b\x32\x1c.flyteidl.core.RetryStrategyR\x07retries\x12&\n\rinterruptible\x18\x06 \x01(\x08H\x00R\rinterruptible\x12\x1e\n\tcacheable\x18\x07 \x01(\x08H\x01R\tcacheable\x12%\n\rcache_version\x18\x08 \x01(\tH\x02R\x0c\x63\x61\x63heVersion\x12/\n\x12\x63\x61\x63he_serializable\x18\t \x01(\x08H\x03R\x11\x63\x61\x63heSerializableB\x15\n\x13interruptible_valueB\x11\n\x0f\x63\x61\x63heable_valueB\x15\n\x13\x63\x61\x63he_version_valueB\x1a\n\x18\x63\x61\x63he_serializable_value\"/\n\x05\x41lias\x12\x10\n\x03var\x18\x01 \x01(\tR\x03var\x12\x14\n\x05\x61lias\x18\x02 \x01(\tR\x05\x61lias\"\x9f\x04\n\x04Node\x12\x0e\n\x02id\x18\x01 \x01(\tR\x02id\x12\x37\n\x08metadata\x18\x02 \x01(\x0b\x32\x1b.flyteidl.core.NodeMetadataR\x08metadata\x12.\n\x06inputs\x18\x03 \x03(\x0b\x32\x16.flyteidl.core.BindingR\x06inputs\x12*\n\x11upstream_node_ids\x18\x04 \x03(\tR\x0fupstreamNodeIds\x12;\n\x0eoutput_aliases\x18\x05 \x03(\x0b\x32\x14.flyteidl.core.AliasR\routputAliases\x12\x36\n\ttask_node\x18\x06 \x01(\x0b\x32\x17.flyteidl.core.TaskNodeH\x00R\x08taskNode\x12\x42\n\rworkflow_node\x18\x07 \x01(\x0b\x32\x1b.flyteidl.core.WorkflowNodeH\x00R\x0cworkflowNode\x12<\n\x0b\x62ranch_node\x18\x08 \x01(\x0b\x32\x19.flyteidl.core.BranchNodeH\x00R\nbranchNode\x12\x36\n\tgate_node\x18\t \x01(\x0b\x32\x17.flyteidl.core.GateNodeH\x00R\x08gateNode\x12\x39\n\narray_node\x18\n \x01(\x0b\x32\x18.flyteidl.core.ArrayNodeH\x00R\tarrayNodeB\x08\n\x06target\"\xfc\x02\n\x10WorkflowMetadata\x12M\n\x12quality_of_service\x18\x01 \x01(\x0b\x32\x1f.flyteidl.core.QualityOfServiceR\x10qualityOfService\x12N\n\non_failure\x18\x02 \x01(\x0e\x32/.flyteidl.core.WorkflowMetadata.OnFailurePolicyR\tonFailure\x12=\n\x04tags\x18\x03 \x03(\x0b\x32).flyteidl.core.WorkflowMetadata.TagsEntryR\x04tags\x1a\x37\n\tTagsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\"Q\n\x0fOnFailurePolicy\x12\x14\n\x10\x46\x41IL_IMMEDIATELY\x10\x00\x12(\n$FAIL_AFTER_EXECUTABLE_NODES_COMPLETE\x10\x01\"@\n\x18WorkflowMetadataDefaults\x12$\n\rinterruptible\x18\x01 \x01(\x08R\rinterruptible\"\xa2\x03\n\x10WorkflowTemplate\x12)\n\x02id\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierR\x02id\x12;\n\x08metadata\x18\x02 \x01(\x0b\x32\x1f.flyteidl.core.WorkflowMetadataR\x08metadata\x12;\n\tinterface\x18\x03 \x01(\x0b\x32\x1d.flyteidl.core.TypedInterfaceR\tinterface\x12)\n\x05nodes\x18\x04 \x03(\x0b\x32\x13.flyteidl.core.NodeR\x05nodes\x12\x30\n\x07outputs\x18\x05 \x03(\x0b\x32\x16.flyteidl.core.BindingR\x07outputs\x12\x36\n\x0c\x66\x61ilure_node\x18\x06 \x01(\x0b\x32\x13.flyteidl.core.NodeR\x0b\x66\x61ilureNode\x12T\n\x11metadata_defaults\x18\x07 \x01(\x0b\x32\'.flyteidl.core.WorkflowMetadataDefaultsR\x10metadataDefaults\"\xc5\x01\n\x11TaskNodeOverrides\x12\x36\n\tresources\x18\x01 \x01(\x0b\x32\x18.flyteidl.core.ResourcesR\tresources\x12O\n\x12\x65xtended_resources\x18\x02 \x01(\x0b\x32 .flyteidl.core.ExtendedResourcesR\x11\x65xtendedResources\x12\'\n\x0f\x63ontainer_image\x18\x03 \x01(\tR\x0e\x63ontainerImage\"\xba\x01\n\x12LaunchPlanTemplate\x12)\n\x02id\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierR\x02id\x12;\n\tinterface\x18\x02 \x01(\x0b\x32\x1d.flyteidl.core.TypedInterfaceR\tinterface\x12<\n\x0c\x66ixed_inputs\x18\x03 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapR\x0b\x66ixedInputsB\xb3\x01\n\x11\x63om.flyteidl.coreB\rWorkflowProtoP\x01Z:github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core\xa2\x02\x03\x46\x43X\xaa\x02\rFlyteidl.Core\xca\x02\rFlyteidl\\Core\xe2\x02\x19\x46lyteidl\\Core\\GPBMetadata\xea\x02\x0e\x46lyteidl::Coreb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1c\x66lyteidl/core/workflow.proto\x12\rflyteidl.core\x1a\x1d\x66lyteidl/core/condition.proto\x1a\x1d\x66lyteidl/core/execution.proto\x1a\x1e\x66lyteidl/core/identifier.proto\x1a\x1d\x66lyteidl/core/interface.proto\x1a\x1c\x66lyteidl/core/literals.proto\x1a\x19\x66lyteidl/core/tasks.proto\x1a\x19\x66lyteidl/core/types.proto\x1a\x1c\x66lyteidl/core/security.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\"{\n\x07IfBlock\x12>\n\tcondition\x18\x01 \x01(\x0b\x32 .flyteidl.core.BooleanExpressionR\tcondition\x12\x30\n\tthen_node\x18\x02 \x01(\x0b\x32\x13.flyteidl.core.NodeR\x08thenNode\"\xd4\x01\n\x0bIfElseBlock\x12*\n\x04\x63\x61se\x18\x01 \x01(\x0b\x32\x16.flyteidl.core.IfBlockR\x04\x63\x61se\x12,\n\x05other\x18\x02 \x03(\x0b\x32\x16.flyteidl.core.IfBlockR\x05other\x12\x32\n\telse_node\x18\x03 \x01(\x0b\x32\x13.flyteidl.core.NodeH\x00R\x08\x65lseNode\x12,\n\x05\x65rror\x18\x04 \x01(\x0b\x32\x14.flyteidl.core.ErrorH\x00R\x05\x65rrorB\t\n\x07\x64\x65\x66\x61ult\"A\n\nBranchNode\x12\x33\n\x07if_else\x18\x01 \x01(\x0b\x32\x1a.flyteidl.core.IfElseBlockR\x06ifElse\"\x97\x01\n\x08TaskNode\x12>\n\x0creference_id\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierH\x00R\x0breferenceId\x12>\n\toverrides\x18\x02 \x01(\x0b\x32 .flyteidl.core.TaskNodeOverridesR\toverridesB\x0b\n\treference\"\xa6\x01\n\x0cWorkflowNode\x12\x42\n\x0elaunchplan_ref\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierH\x00R\rlaunchplanRef\x12\x45\n\x10sub_workflow_ref\x18\x02 \x01(\x0b\x32\x19.flyteidl.core.IdentifierH\x00R\x0esubWorkflowRefB\x0b\n\treference\"/\n\x10\x41pproveCondition\x12\x1b\n\tsignal_id\x18\x01 \x01(\tR\x08signalId\"\x90\x01\n\x0fSignalCondition\x12\x1b\n\tsignal_id\x18\x01 \x01(\tR\x08signalId\x12.\n\x04type\x18\x02 \x01(\x0b\x32\x1a.flyteidl.core.LiteralTypeR\x04type\x12\x30\n\x14output_variable_name\x18\x03 \x01(\tR\x12outputVariableName\"G\n\x0eSleepCondition\x12\x35\n\x08\x64uration\x18\x01 \x01(\x0b\x32\x19.google.protobuf.DurationR\x08\x64uration\"\xc5\x01\n\x08GateNode\x12;\n\x07\x61pprove\x18\x01 \x01(\x0b\x32\x1f.flyteidl.core.ApproveConditionH\x00R\x07\x61pprove\x12\x38\n\x06signal\x18\x02 \x01(\x0b\x32\x1e.flyteidl.core.SignalConditionH\x00R\x06signal\x12\x35\n\x05sleep\x18\x03 \x01(\x0b\x32\x1d.flyteidl.core.SleepConditionH\x00R\x05sleepB\x0b\n\tcondition\"\xba\x03\n\tArrayNode\x12\'\n\x04node\x18\x01 \x01(\x0b\x32\x13.flyteidl.core.NodeR\x04node\x12\"\n\x0bparallelism\x18\x02 \x01(\rH\x00R\x0bparallelism\x12%\n\rmin_successes\x18\x03 \x01(\rH\x01R\x0cminSuccesses\x12,\n\x11min_success_ratio\x18\x04 \x01(\x02H\x01R\x0fminSuccessRatio\x12M\n\x0e\x65xecution_mode\x18\x05 \x01(\x0e\x32&.flyteidl.core.ArrayNode.ExecutionModeR\rexecutionMode\x12^\n\x1eis_original_sub_node_interface\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.BoolValueR\x1aisOriginalSubNodeInterface\"2\n\rExecutionMode\x12\x11\n\rMINIMAL_STATE\x10\x00\x12\x0e\n\nFULL_STATE\x10\x01\x42\x14\n\x12parallelism_optionB\x12\n\x10success_criteria\"\x8c\x03\n\x0cNodeMetadata\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x33\n\x07timeout\x18\x04 \x01(\x0b\x32\x19.google.protobuf.DurationR\x07timeout\x12\x36\n\x07retries\x18\x05 \x01(\x0b\x32\x1c.flyteidl.core.RetryStrategyR\x07retries\x12&\n\rinterruptible\x18\x06 \x01(\x08H\x00R\rinterruptible\x12\x1e\n\tcacheable\x18\x07 \x01(\x08H\x01R\tcacheable\x12%\n\rcache_version\x18\x08 \x01(\tH\x02R\x0c\x63\x61\x63heVersion\x12/\n\x12\x63\x61\x63he_serializable\x18\t \x01(\x08H\x03R\x11\x63\x61\x63heSerializableB\x15\n\x13interruptible_valueB\x11\n\x0f\x63\x61\x63heable_valueB\x15\n\x13\x63\x61\x63he_version_valueB\x1a\n\x18\x63\x61\x63he_serializable_value\"/\n\x05\x41lias\x12\x10\n\x03var\x18\x01 \x01(\tR\x03var\x12\x14\n\x05\x61lias\x18\x02 \x01(\tR\x05\x61lias\"\x9f\x04\n\x04Node\x12\x0e\n\x02id\x18\x01 \x01(\tR\x02id\x12\x37\n\x08metadata\x18\x02 \x01(\x0b\x32\x1b.flyteidl.core.NodeMetadataR\x08metadata\x12.\n\x06inputs\x18\x03 \x03(\x0b\x32\x16.flyteidl.core.BindingR\x06inputs\x12*\n\x11upstream_node_ids\x18\x04 \x03(\tR\x0fupstreamNodeIds\x12;\n\x0eoutput_aliases\x18\x05 \x03(\x0b\x32\x14.flyteidl.core.AliasR\routputAliases\x12\x36\n\ttask_node\x18\x06 \x01(\x0b\x32\x17.flyteidl.core.TaskNodeH\x00R\x08taskNode\x12\x42\n\rworkflow_node\x18\x07 \x01(\x0b\x32\x1b.flyteidl.core.WorkflowNodeH\x00R\x0cworkflowNode\x12<\n\x0b\x62ranch_node\x18\x08 \x01(\x0b\x32\x19.flyteidl.core.BranchNodeH\x00R\nbranchNode\x12\x36\n\tgate_node\x18\t \x01(\x0b\x32\x17.flyteidl.core.GateNodeH\x00R\x08gateNode\x12\x39\n\narray_node\x18\n \x01(\x0b\x32\x18.flyteidl.core.ArrayNodeH\x00R\tarrayNodeB\x08\n\x06target\"\xfc\x02\n\x10WorkflowMetadata\x12M\n\x12quality_of_service\x18\x01 \x01(\x0b\x32\x1f.flyteidl.core.QualityOfServiceR\x10qualityOfService\x12N\n\non_failure\x18\x02 \x01(\x0e\x32/.flyteidl.core.WorkflowMetadata.OnFailurePolicyR\tonFailure\x12=\n\x04tags\x18\x03 \x03(\x0b\x32).flyteidl.core.WorkflowMetadata.TagsEntryR\x04tags\x1a\x37\n\tTagsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\"Q\n\x0fOnFailurePolicy\x12\x14\n\x10\x46\x41IL_IMMEDIATELY\x10\x00\x12(\n$FAIL_AFTER_EXECUTABLE_NODES_COMPLETE\x10\x01\"@\n\x18WorkflowMetadataDefaults\x12$\n\rinterruptible\x18\x01 \x01(\x08R\rinterruptible\"\xa2\x03\n\x10WorkflowTemplate\x12)\n\x02id\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierR\x02id\x12;\n\x08metadata\x18\x02 \x01(\x0b\x32\x1f.flyteidl.core.WorkflowMetadataR\x08metadata\x12;\n\tinterface\x18\x03 \x01(\x0b\x32\x1d.flyteidl.core.TypedInterfaceR\tinterface\x12)\n\x05nodes\x18\x04 \x03(\x0b\x32\x13.flyteidl.core.NodeR\x05nodes\x12\x30\n\x07outputs\x18\x05 \x03(\x0b\x32\x16.flyteidl.core.BindingR\x07outputs\x12\x36\n\x0c\x66\x61ilure_node\x18\x06 \x01(\x0b\x32\x13.flyteidl.core.NodeR\x0b\x66\x61ilureNode\x12T\n\x11metadata_defaults\x18\x07 \x01(\x0b\x32\'.flyteidl.core.WorkflowMetadataDefaultsR\x10metadataDefaults\"\xc5\x01\n\x11TaskNodeOverrides\x12\x36\n\tresources\x18\x01 \x01(\x0b\x32\x18.flyteidl.core.ResourcesR\tresources\x12O\n\x12\x65xtended_resources\x18\x02 \x01(\x0b\x32 .flyteidl.core.ExtendedResourcesR\x11\x65xtendedResources\x12\'\n\x0f\x63ontainer_image\x18\x03 \x01(\tR\x0e\x63ontainerImage\"\xba\x01\n\x12LaunchPlanTemplate\x12)\n\x02id\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierR\x02id\x12;\n\tinterface\x18\x02 \x01(\x0b\x32\x1d.flyteidl.core.TypedInterfaceR\tinterface\x12<\n\x0c\x66ixed_inputs\x18\x03 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapR\x0b\x66ixedInputsB\xb3\x01\n\x11\x63om.flyteidl.coreB\rWorkflowProtoP\x01Z:github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core\xa2\x02\x03\x46\x43X\xaa\x02\rFlyteidl.Core\xca\x02\rFlyteidl\\Core\xe2\x02\x19\x46lyteidl\\Core\\GPBMetadata\xea\x02\x0e\x46lyteidl::Coreb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -53,27 +53,27 @@ _globals['_GATENODE']._serialized_start=1350 _globals['_GATENODE']._serialized_end=1547 _globals['_ARRAYNODE']._serialized_start=1550 - _globals['_ARRAYNODE']._serialized_end=1896 - _globals['_ARRAYNODE_EXECUTIONMODE']._serialized_start=1804 - _globals['_ARRAYNODE_EXECUTIONMODE']._serialized_end=1854 - _globals['_NODEMETADATA']._serialized_start=1899 - _globals['_NODEMETADATA']._serialized_end=2295 - _globals['_ALIAS']._serialized_start=2297 - _globals['_ALIAS']._serialized_end=2344 - _globals['_NODE']._serialized_start=2347 - _globals['_NODE']._serialized_end=2890 - _globals['_WORKFLOWMETADATA']._serialized_start=2893 - _globals['_WORKFLOWMETADATA']._serialized_end=3273 - _globals['_WORKFLOWMETADATA_TAGSENTRY']._serialized_start=3135 - _globals['_WORKFLOWMETADATA_TAGSENTRY']._serialized_end=3190 - _globals['_WORKFLOWMETADATA_ONFAILUREPOLICY']._serialized_start=3192 - _globals['_WORKFLOWMETADATA_ONFAILUREPOLICY']._serialized_end=3273 - _globals['_WORKFLOWMETADATADEFAULTS']._serialized_start=3275 - _globals['_WORKFLOWMETADATADEFAULTS']._serialized_end=3339 - _globals['_WORKFLOWTEMPLATE']._serialized_start=3342 - _globals['_WORKFLOWTEMPLATE']._serialized_end=3760 - _globals['_TASKNODEOVERRIDES']._serialized_start=3763 - _globals['_TASKNODEOVERRIDES']._serialized_end=3960 - _globals['_LAUNCHPLANTEMPLATE']._serialized_start=3963 - _globals['_LAUNCHPLANTEMPLATE']._serialized_end=4149 + _globals['_ARRAYNODE']._serialized_end=1992 + _globals['_ARRAYNODE_EXECUTIONMODE']._serialized_start=1900 + _globals['_ARRAYNODE_EXECUTIONMODE']._serialized_end=1950 + _globals['_NODEMETADATA']._serialized_start=1995 + _globals['_NODEMETADATA']._serialized_end=2391 + _globals['_ALIAS']._serialized_start=2393 + _globals['_ALIAS']._serialized_end=2440 + _globals['_NODE']._serialized_start=2443 + _globals['_NODE']._serialized_end=2986 + _globals['_WORKFLOWMETADATA']._serialized_start=2989 + _globals['_WORKFLOWMETADATA']._serialized_end=3369 + _globals['_WORKFLOWMETADATA_TAGSENTRY']._serialized_start=3231 + _globals['_WORKFLOWMETADATA_TAGSENTRY']._serialized_end=3286 + _globals['_WORKFLOWMETADATA_ONFAILUREPOLICY']._serialized_start=3288 + _globals['_WORKFLOWMETADATA_ONFAILUREPOLICY']._serialized_end=3369 + _globals['_WORKFLOWMETADATADEFAULTS']._serialized_start=3371 + _globals['_WORKFLOWMETADATADEFAULTS']._serialized_end=3435 + _globals['_WORKFLOWTEMPLATE']._serialized_start=3438 + _globals['_WORKFLOWTEMPLATE']._serialized_end=3856 + _globals['_TASKNODEOVERRIDES']._serialized_start=3859 + _globals['_TASKNODEOVERRIDES']._serialized_end=4056 + _globals['_LAUNCHPLANTEMPLATE']._serialized_start=4059 + _globals['_LAUNCHPLANTEMPLATE']._serialized_end=4245 # @@protoc_insertion_point(module_scope) diff --git a/flyteidl/gen/pb_python/flyteidl/core/workflow_pb2.pyi b/flyteidl/gen/pb_python/flyteidl/core/workflow_pb2.pyi index 664581b0f4..6099ea6d6e 100644 --- a/flyteidl/gen/pb_python/flyteidl/core/workflow_pb2.pyi +++ b/flyteidl/gen/pb_python/flyteidl/core/workflow_pb2.pyi @@ -91,7 +91,7 @@ class GateNode(_message.Message): def __init__(self, approve: _Optional[_Union[ApproveCondition, _Mapping]] = ..., signal: _Optional[_Union[SignalCondition, _Mapping]] = ..., sleep: _Optional[_Union[SleepCondition, _Mapping]] = ...) -> None: ... class ArrayNode(_message.Message): - __slots__ = ["node", "parallelism", "min_successes", "min_success_ratio", "execution_mode"] + __slots__ = ["node", "parallelism", "min_successes", "min_success_ratio", "execution_mode", "is_original_sub_node_interface"] class ExecutionMode(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): __slots__ = [] MINIMAL_STATE: _ClassVar[ArrayNode.ExecutionMode] @@ -103,12 +103,14 @@ class ArrayNode(_message.Message): MIN_SUCCESSES_FIELD_NUMBER: _ClassVar[int] MIN_SUCCESS_RATIO_FIELD_NUMBER: _ClassVar[int] EXECUTION_MODE_FIELD_NUMBER: _ClassVar[int] + IS_ORIGINAL_SUB_NODE_INTERFACE_FIELD_NUMBER: _ClassVar[int] node: Node parallelism: int min_successes: int min_success_ratio: float execution_mode: ArrayNode.ExecutionMode - def __init__(self, node: _Optional[_Union[Node, _Mapping]] = ..., parallelism: _Optional[int] = ..., min_successes: _Optional[int] = ..., min_success_ratio: _Optional[float] = ..., execution_mode: _Optional[_Union[ArrayNode.ExecutionMode, str]] = ...) -> None: ... + is_original_sub_node_interface: _wrappers_pb2.BoolValue + def __init__(self, node: _Optional[_Union[Node, _Mapping]] = ..., parallelism: _Optional[int] = ..., min_successes: _Optional[int] = ..., min_success_ratio: _Optional[float] = ..., execution_mode: _Optional[_Union[ArrayNode.ExecutionMode, str]] = ..., is_original_sub_node_interface: _Optional[_Union[_wrappers_pb2.BoolValue, _Mapping]] = ...) -> None: ... class NodeMetadata(_message.Message): __slots__ = ["name", "timeout", "retries", "interruptible", "cacheable", "cache_version", "cache_serializable"] diff --git a/flyteidl/gen/pb_python/flyteidl/event/event_pb2.py b/flyteidl/gen/pb_python/flyteidl/event/event_pb2.py index 5974dfe477..7b9b4e8c2c 100644 --- a/flyteidl/gen/pb_python/flyteidl/event/event_pb2.py +++ b/flyteidl/gen/pb_python/flyteidl/event/event_pb2.py @@ -20,7 +20,7 @@ from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1a\x66lyteidl/event/event.proto\x12\x0e\x66lyteidl.event\x1a\x1c\x66lyteidl/core/literals.proto\x1a\x1c\x66lyteidl/core/compiler.proto\x1a\x1d\x66lyteidl/core/execution.proto\x1a\x1e\x66lyteidl/core/identifier.proto\x1a\x1b\x66lyteidl/core/catalog.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xaa\x03\n\x16WorkflowExecutionEvent\x12M\n\x0c\x65xecution_id\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x0b\x65xecutionId\x12\x1f\n\x0bproducer_id\x18\x02 \x01(\tR\nproducerId\x12<\n\x05phase\x18\x03 \x01(\x0e\x32&.flyteidl.core.WorkflowExecution.PhaseR\x05phase\x12;\n\x0boccurred_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\noccurredAt\x12\x1f\n\noutput_uri\x18\x05 \x01(\tH\x00R\toutputUri\x12\x35\n\x05\x65rror\x18\x06 \x01(\x0b\x32\x1d.flyteidl.core.ExecutionErrorH\x00R\x05\x65rror\x12<\n\x0boutput_data\x18\x07 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapH\x00R\noutputDataB\x0f\n\routput_result\"\x99\n\n\x12NodeExecutionEvent\x12\x36\n\x02id\x18\x01 \x01(\x0b\x32&.flyteidl.core.NodeExecutionIdentifierR\x02id\x12\x1f\n\x0bproducer_id\x18\x02 \x01(\tR\nproducerId\x12\x38\n\x05phase\x18\x03 \x01(\x0e\x32\".flyteidl.core.NodeExecution.PhaseR\x05phase\x12;\n\x0boccurred_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\noccurredAt\x12\x1d\n\tinput_uri\x18\x05 \x01(\tH\x00R\x08inputUri\x12:\n\ninput_data\x18\x14 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapH\x00R\tinputData\x12\x1f\n\noutput_uri\x18\x06 \x01(\tH\x01R\toutputUri\x12\x35\n\x05\x65rror\x18\x07 \x01(\x0b\x32\x1d.flyteidl.core.ExecutionErrorH\x01R\x05\x65rror\x12<\n\x0boutput_data\x18\x0f \x01(\x0b\x32\x19.flyteidl.core.LiteralMapH\x01R\noutputData\x12\\\n\x16workflow_node_metadata\x18\x08 \x01(\x0b\x32$.flyteidl.event.WorkflowNodeMetadataH\x02R\x14workflowNodeMetadata\x12P\n\x12task_node_metadata\x18\x0e \x01(\x0b\x32 .flyteidl.event.TaskNodeMetadataH\x02R\x10taskNodeMetadata\x12]\n\x14parent_task_metadata\x18\t \x01(\x0b\x32+.flyteidl.event.ParentTaskExecutionMetadataR\x12parentTaskMetadata\x12]\n\x14parent_node_metadata\x18\n \x01(\x0b\x32+.flyteidl.event.ParentNodeExecutionMetadataR\x12parentNodeMetadata\x12\x1f\n\x0bretry_group\x18\x0b \x01(\tR\nretryGroup\x12 \n\x0cspec_node_id\x18\x0c \x01(\tR\nspecNodeId\x12\x1b\n\tnode_name\x18\r \x01(\tR\x08nodeName\x12#\n\revent_version\x18\x10 \x01(\x05R\x0c\x65ventVersion\x12\x1b\n\tis_parent\x18\x11 \x01(\x08R\x08isParent\x12\x1d\n\nis_dynamic\x18\x12 \x01(\x08R\tisDynamic\x12\x19\n\x08\x64\x65\x63k_uri\x18\x13 \x01(\tR\x07\x64\x65\x63kUri\x12;\n\x0breported_at\x18\x15 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\nreportedAt\x12\x19\n\x08is_array\x18\x16 \x01(\x08R\x07isArray\x12>\n\rtarget_entity\x18\x17 \x01(\x0b\x32\x19.flyteidl.core.IdentifierR\x0ctargetEntity\x12-\n\x13is_in_dynamic_chain\x18\x18 \x01(\x08R\x10isInDynamicChainB\r\n\x0binput_valueB\x0f\n\routput_resultB\x11\n\x0ftarget_metadata\"e\n\x14WorkflowNodeMetadata\x12M\n\x0c\x65xecution_id\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x0b\x65xecutionId\"\xf1\x02\n\x10TaskNodeMetadata\x12\x44\n\x0c\x63\x61\x63he_status\x18\x01 \x01(\x0e\x32!.flyteidl.core.CatalogCacheStatusR\x0b\x63\x61\x63heStatus\x12?\n\x0b\x63\x61talog_key\x18\x02 \x01(\x0b\x32\x1e.flyteidl.core.CatalogMetadataR\ncatalogKey\x12W\n\x12reservation_status\x18\x03 \x01(\x0e\x32(.flyteidl.core.CatalogReservation.StatusR\x11reservationStatus\x12%\n\x0e\x63heckpoint_uri\x18\x04 \x01(\tR\rcheckpointUri\x12V\n\x10\x64ynamic_workflow\x18\x10 \x01(\x0b\x32+.flyteidl.event.DynamicWorkflowNodeMetadataR\x0f\x64ynamicWorkflow\"\xce\x01\n\x1b\x44ynamicWorkflowNodeMetadata\x12)\n\x02id\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierR\x02id\x12S\n\x11\x63ompiled_workflow\x18\x02 \x01(\x0b\x32&.flyteidl.core.CompiledWorkflowClosureR\x10\x63ompiledWorkflow\x12/\n\x14\x64ynamic_job_spec_uri\x18\x03 \x01(\tR\x11\x64ynamicJobSpecUri\"U\n\x1bParentTaskExecutionMetadata\x12\x36\n\x02id\x18\x01 \x01(\x0b\x32&.flyteidl.core.TaskExecutionIdentifierR\x02id\"6\n\x1bParentNodeExecutionMetadata\x12\x17\n\x07node_id\x18\x01 \x01(\tR\x06nodeId\"b\n\x0b\x45ventReason\x12\x16\n\x06reason\x18\x01 \x01(\tR\x06reason\x12;\n\x0boccurred_at\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\noccurredAt\"\x97\x08\n\x12TaskExecutionEvent\x12\x32\n\x07task_id\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierR\x06taskId\x12_\n\x18parent_node_execution_id\x18\x02 \x01(\x0b\x32&.flyteidl.core.NodeExecutionIdentifierR\x15parentNodeExecutionId\x12#\n\rretry_attempt\x18\x03 \x01(\rR\x0cretryAttempt\x12\x38\n\x05phase\x18\x04 \x01(\x0e\x32\".flyteidl.core.TaskExecution.PhaseR\x05phase\x12\x1f\n\x0bproducer_id\x18\x05 \x01(\tR\nproducerId\x12*\n\x04logs\x18\x06 \x03(\x0b\x32\x16.flyteidl.core.TaskLogR\x04logs\x12;\n\x0boccurred_at\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\noccurredAt\x12\x1d\n\tinput_uri\x18\x08 \x01(\tH\x00R\x08inputUri\x12:\n\ninput_data\x18\x13 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapH\x00R\tinputData\x12\x1f\n\noutput_uri\x18\t \x01(\tH\x01R\toutputUri\x12\x35\n\x05\x65rror\x18\n \x01(\x0b\x32\x1d.flyteidl.core.ExecutionErrorH\x01R\x05\x65rror\x12<\n\x0boutput_data\x18\x11 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapH\x01R\noutputData\x12\x38\n\x0b\x63ustom_info\x18\x0b \x01(\x0b\x32\x17.google.protobuf.StructR\ncustomInfo\x12#\n\rphase_version\x18\x0c \x01(\rR\x0cphaseVersion\x12\x1a\n\x06reason\x18\r \x01(\tB\x02\x18\x01R\x06reason\x12\x35\n\x07reasons\x18\x15 \x03(\x0b\x32\x1b.flyteidl.event.EventReasonR\x07reasons\x12\x1b\n\ttask_type\x18\x0e \x01(\tR\x08taskType\x12\x41\n\x08metadata\x18\x10 \x01(\x0b\x32%.flyteidl.event.TaskExecutionMetadataR\x08metadata\x12#\n\revent_version\x18\x12 \x01(\x05R\x0c\x65ventVersion\x12;\n\x0breported_at\x18\x14 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\nreportedAtB\r\n\x0binput_valueB\x0f\n\routput_result\"\x9e\x02\n\x14\x45xternalResourceInfo\x12\x1f\n\x0b\x65xternal_id\x18\x01 \x01(\tR\nexternalId\x12\x14\n\x05index\x18\x02 \x01(\rR\x05index\x12#\n\rretry_attempt\x18\x03 \x01(\rR\x0cretryAttempt\x12\x38\n\x05phase\x18\x04 \x01(\x0e\x32\".flyteidl.core.TaskExecution.PhaseR\x05phase\x12\x44\n\x0c\x63\x61\x63he_status\x18\x05 \x01(\x0e\x32!.flyteidl.core.CatalogCacheStatusR\x0b\x63\x61\x63heStatus\x12*\n\x04logs\x18\x06 \x03(\x0b\x32\x16.flyteidl.core.TaskLogR\x04logs\"[\n\x10ResourcePoolInfo\x12)\n\x10\x61llocation_token\x18\x01 \x01(\tR\x0f\x61llocationToken\x12\x1c\n\tnamespace\x18\x02 \x01(\tR\tnamespace\"\x9d\x03\n\x15TaskExecutionMetadata\x12%\n\x0egenerated_name\x18\x01 \x01(\tR\rgeneratedName\x12S\n\x12\x65xternal_resources\x18\x02 \x03(\x0b\x32$.flyteidl.event.ExternalResourceInfoR\x11\x65xternalResources\x12N\n\x12resource_pool_info\x18\x03 \x03(\x0b\x32 .flyteidl.event.ResourcePoolInfoR\x10resourcePoolInfo\x12+\n\x11plugin_identifier\x18\x04 \x01(\tR\x10pluginIdentifier\x12Z\n\x0einstance_class\x18\x10 \x01(\x0e\x32\x33.flyteidl.event.TaskExecutionMetadata.InstanceClassR\rinstanceClass\"/\n\rInstanceClass\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x11\n\rINTERRUPTIBLE\x10\x01\x42\xb6\x01\n\x12\x63om.flyteidl.eventB\nEventProtoP\x01Z;github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/event\xa2\x02\x03\x46\x45X\xaa\x02\x0e\x46lyteidl.Event\xca\x02\x0e\x46lyteidl\\Event\xe2\x02\x1a\x46lyteidl\\Event\\GPBMetadata\xea\x02\x0f\x46lyteidl::Eventb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1a\x66lyteidl/event/event.proto\x12\x0e\x66lyteidl.event\x1a\x1c\x66lyteidl/core/literals.proto\x1a\x1c\x66lyteidl/core/compiler.proto\x1a\x1d\x66lyteidl/core/execution.proto\x1a\x1e\x66lyteidl/core/identifier.proto\x1a\x1b\x66lyteidl/core/catalog.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xaa\x03\n\x16WorkflowExecutionEvent\x12M\n\x0c\x65xecution_id\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x0b\x65xecutionId\x12\x1f\n\x0bproducer_id\x18\x02 \x01(\tR\nproducerId\x12<\n\x05phase\x18\x03 \x01(\x0e\x32&.flyteidl.core.WorkflowExecution.PhaseR\x05phase\x12;\n\x0boccurred_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\noccurredAt\x12\x1f\n\noutput_uri\x18\x05 \x01(\tH\x00R\toutputUri\x12\x35\n\x05\x65rror\x18\x06 \x01(\x0b\x32\x1d.flyteidl.core.ExecutionErrorH\x00R\x05\x65rror\x12<\n\x0boutput_data\x18\x07 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapH\x00R\noutputDataB\x0f\n\routput_result\"\xb4\n\n\x12NodeExecutionEvent\x12\x36\n\x02id\x18\x01 \x01(\x0b\x32&.flyteidl.core.NodeExecutionIdentifierR\x02id\x12\x1f\n\x0bproducer_id\x18\x02 \x01(\tR\nproducerId\x12\x38\n\x05phase\x18\x03 \x01(\x0e\x32\".flyteidl.core.NodeExecution.PhaseR\x05phase\x12;\n\x0boccurred_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\noccurredAt\x12\x1d\n\tinput_uri\x18\x05 \x01(\tH\x00R\x08inputUri\x12:\n\ninput_data\x18\x14 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapH\x00R\tinputData\x12\x1f\n\noutput_uri\x18\x06 \x01(\tH\x01R\toutputUri\x12\x35\n\x05\x65rror\x18\x07 \x01(\x0b\x32\x1d.flyteidl.core.ExecutionErrorH\x01R\x05\x65rror\x12<\n\x0boutput_data\x18\x0f \x01(\x0b\x32\x19.flyteidl.core.LiteralMapH\x01R\noutputData\x12\\\n\x16workflow_node_metadata\x18\x08 \x01(\x0b\x32$.flyteidl.event.WorkflowNodeMetadataH\x02R\x14workflowNodeMetadata\x12P\n\x12task_node_metadata\x18\x0e \x01(\x0b\x32 .flyteidl.event.TaskNodeMetadataH\x02R\x10taskNodeMetadata\x12]\n\x14parent_task_metadata\x18\t \x01(\x0b\x32+.flyteidl.event.ParentTaskExecutionMetadataR\x12parentTaskMetadata\x12]\n\x14parent_node_metadata\x18\n \x01(\x0b\x32+.flyteidl.event.ParentNodeExecutionMetadataR\x12parentNodeMetadata\x12\x1f\n\x0bretry_group\x18\x0b \x01(\tR\nretryGroup\x12 \n\x0cspec_node_id\x18\x0c \x01(\tR\nspecNodeId\x12\x1b\n\tnode_name\x18\r \x01(\tR\x08nodeName\x12#\n\revent_version\x18\x10 \x01(\x05R\x0c\x65ventVersion\x12\x1b\n\tis_parent\x18\x11 \x01(\x08R\x08isParent\x12\x1d\n\nis_dynamic\x18\x12 \x01(\x08R\tisDynamic\x12\x19\n\x08\x64\x65\x63k_uri\x18\x13 \x01(\tR\x07\x64\x65\x63kUri\x12;\n\x0breported_at\x18\x15 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\nreportedAt\x12\x19\n\x08is_array\x18\x16 \x01(\x08R\x07isArray\x12>\n\rtarget_entity\x18\x17 \x01(\x0b\x32\x19.flyteidl.core.IdentifierR\x0ctargetEntity\x12-\n\x13is_in_dynamic_chain\x18\x18 \x01(\x08R\x10isInDynamicChain\x12\x19\n\x08is_eager\x18\x19 \x01(\x08R\x07isEagerB\r\n\x0binput_valueB\x0f\n\routput_resultB\x11\n\x0ftarget_metadata\"e\n\x14WorkflowNodeMetadata\x12M\n\x0c\x65xecution_id\x18\x01 \x01(\x0b\x32*.flyteidl.core.WorkflowExecutionIdentifierR\x0b\x65xecutionId\"\xf1\x02\n\x10TaskNodeMetadata\x12\x44\n\x0c\x63\x61\x63he_status\x18\x01 \x01(\x0e\x32!.flyteidl.core.CatalogCacheStatusR\x0b\x63\x61\x63heStatus\x12?\n\x0b\x63\x61talog_key\x18\x02 \x01(\x0b\x32\x1e.flyteidl.core.CatalogMetadataR\ncatalogKey\x12W\n\x12reservation_status\x18\x03 \x01(\x0e\x32(.flyteidl.core.CatalogReservation.StatusR\x11reservationStatus\x12%\n\x0e\x63heckpoint_uri\x18\x04 \x01(\tR\rcheckpointUri\x12V\n\x10\x64ynamic_workflow\x18\x10 \x01(\x0b\x32+.flyteidl.event.DynamicWorkflowNodeMetadataR\x0f\x64ynamicWorkflow\"\xce\x01\n\x1b\x44ynamicWorkflowNodeMetadata\x12)\n\x02id\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierR\x02id\x12S\n\x11\x63ompiled_workflow\x18\x02 \x01(\x0b\x32&.flyteidl.core.CompiledWorkflowClosureR\x10\x63ompiledWorkflow\x12/\n\x14\x64ynamic_job_spec_uri\x18\x03 \x01(\tR\x11\x64ynamicJobSpecUri\"U\n\x1bParentTaskExecutionMetadata\x12\x36\n\x02id\x18\x01 \x01(\x0b\x32&.flyteidl.core.TaskExecutionIdentifierR\x02id\"6\n\x1bParentNodeExecutionMetadata\x12\x17\n\x07node_id\x18\x01 \x01(\tR\x06nodeId\"b\n\x0b\x45ventReason\x12\x16\n\x06reason\x18\x01 \x01(\tR\x06reason\x12;\n\x0boccurred_at\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\noccurredAt\"\x97\x08\n\x12TaskExecutionEvent\x12\x32\n\x07task_id\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierR\x06taskId\x12_\n\x18parent_node_execution_id\x18\x02 \x01(\x0b\x32&.flyteidl.core.NodeExecutionIdentifierR\x15parentNodeExecutionId\x12#\n\rretry_attempt\x18\x03 \x01(\rR\x0cretryAttempt\x12\x38\n\x05phase\x18\x04 \x01(\x0e\x32\".flyteidl.core.TaskExecution.PhaseR\x05phase\x12\x1f\n\x0bproducer_id\x18\x05 \x01(\tR\nproducerId\x12*\n\x04logs\x18\x06 \x03(\x0b\x32\x16.flyteidl.core.TaskLogR\x04logs\x12;\n\x0boccurred_at\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\noccurredAt\x12\x1d\n\tinput_uri\x18\x08 \x01(\tH\x00R\x08inputUri\x12:\n\ninput_data\x18\x13 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapH\x00R\tinputData\x12\x1f\n\noutput_uri\x18\t \x01(\tH\x01R\toutputUri\x12\x35\n\x05\x65rror\x18\n \x01(\x0b\x32\x1d.flyteidl.core.ExecutionErrorH\x01R\x05\x65rror\x12<\n\x0boutput_data\x18\x11 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapH\x01R\noutputData\x12\x38\n\x0b\x63ustom_info\x18\x0b \x01(\x0b\x32\x17.google.protobuf.StructR\ncustomInfo\x12#\n\rphase_version\x18\x0c \x01(\rR\x0cphaseVersion\x12\x1a\n\x06reason\x18\r \x01(\tB\x02\x18\x01R\x06reason\x12\x35\n\x07reasons\x18\x15 \x03(\x0b\x32\x1b.flyteidl.event.EventReasonR\x07reasons\x12\x1b\n\ttask_type\x18\x0e \x01(\tR\x08taskType\x12\x41\n\x08metadata\x18\x10 \x01(\x0b\x32%.flyteidl.event.TaskExecutionMetadataR\x08metadata\x12#\n\revent_version\x18\x12 \x01(\x05R\x0c\x65ventVersion\x12;\n\x0breported_at\x18\x14 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\nreportedAtB\r\n\x0binput_valueB\x0f\n\routput_result\"\x9e\x02\n\x14\x45xternalResourceInfo\x12\x1f\n\x0b\x65xternal_id\x18\x01 \x01(\tR\nexternalId\x12\x14\n\x05index\x18\x02 \x01(\rR\x05index\x12#\n\rretry_attempt\x18\x03 \x01(\rR\x0cretryAttempt\x12\x38\n\x05phase\x18\x04 \x01(\x0e\x32\".flyteidl.core.TaskExecution.PhaseR\x05phase\x12\x44\n\x0c\x63\x61\x63he_status\x18\x05 \x01(\x0e\x32!.flyteidl.core.CatalogCacheStatusR\x0b\x63\x61\x63heStatus\x12*\n\x04logs\x18\x06 \x03(\x0b\x32\x16.flyteidl.core.TaskLogR\x04logs\"[\n\x10ResourcePoolInfo\x12)\n\x10\x61llocation_token\x18\x01 \x01(\tR\x0f\x61llocationToken\x12\x1c\n\tnamespace\x18\x02 \x01(\tR\tnamespace\"\x9d\x03\n\x15TaskExecutionMetadata\x12%\n\x0egenerated_name\x18\x01 \x01(\tR\rgeneratedName\x12S\n\x12\x65xternal_resources\x18\x02 \x03(\x0b\x32$.flyteidl.event.ExternalResourceInfoR\x11\x65xternalResources\x12N\n\x12resource_pool_info\x18\x03 \x03(\x0b\x32 .flyteidl.event.ResourcePoolInfoR\x10resourcePoolInfo\x12+\n\x11plugin_identifier\x18\x04 \x01(\tR\x10pluginIdentifier\x12Z\n\x0einstance_class\x18\x10 \x01(\x0e\x32\x33.flyteidl.event.TaskExecutionMetadata.InstanceClassR\rinstanceClass\"/\n\rInstanceClass\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x11\n\rINTERRUPTIBLE\x10\x01\x42\xb6\x01\n\x12\x63om.flyteidl.eventB\nEventProtoP\x01Z;github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/event\xa2\x02\x03\x46\x45X\xaa\x02\x0e\x46lyteidl.Event\xca\x02\x0e\x46lyteidl\\Event\xe2\x02\x1a\x46lyteidl\\Event\\GPBMetadata\xea\x02\x0f\x46lyteidl::Eventb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -34,27 +34,27 @@ _globals['_WORKFLOWEXECUTIONEVENT']._serialized_start=262 _globals['_WORKFLOWEXECUTIONEVENT']._serialized_end=688 _globals['_NODEEXECUTIONEVENT']._serialized_start=691 - _globals['_NODEEXECUTIONEVENT']._serialized_end=1996 - _globals['_WORKFLOWNODEMETADATA']._serialized_start=1998 - _globals['_WORKFLOWNODEMETADATA']._serialized_end=2099 - _globals['_TASKNODEMETADATA']._serialized_start=2102 - _globals['_TASKNODEMETADATA']._serialized_end=2471 - _globals['_DYNAMICWORKFLOWNODEMETADATA']._serialized_start=2474 - _globals['_DYNAMICWORKFLOWNODEMETADATA']._serialized_end=2680 - _globals['_PARENTTASKEXECUTIONMETADATA']._serialized_start=2682 - _globals['_PARENTTASKEXECUTIONMETADATA']._serialized_end=2767 - _globals['_PARENTNODEEXECUTIONMETADATA']._serialized_start=2769 - _globals['_PARENTNODEEXECUTIONMETADATA']._serialized_end=2823 - _globals['_EVENTREASON']._serialized_start=2825 - _globals['_EVENTREASON']._serialized_end=2923 - _globals['_TASKEXECUTIONEVENT']._serialized_start=2926 - _globals['_TASKEXECUTIONEVENT']._serialized_end=3973 - _globals['_EXTERNALRESOURCEINFO']._serialized_start=3976 - _globals['_EXTERNALRESOURCEINFO']._serialized_end=4262 - _globals['_RESOURCEPOOLINFO']._serialized_start=4264 - _globals['_RESOURCEPOOLINFO']._serialized_end=4355 - _globals['_TASKEXECUTIONMETADATA']._serialized_start=4358 - _globals['_TASKEXECUTIONMETADATA']._serialized_end=4771 - _globals['_TASKEXECUTIONMETADATA_INSTANCECLASS']._serialized_start=4724 - _globals['_TASKEXECUTIONMETADATA_INSTANCECLASS']._serialized_end=4771 + _globals['_NODEEXECUTIONEVENT']._serialized_end=2023 + _globals['_WORKFLOWNODEMETADATA']._serialized_start=2025 + _globals['_WORKFLOWNODEMETADATA']._serialized_end=2126 + _globals['_TASKNODEMETADATA']._serialized_start=2129 + _globals['_TASKNODEMETADATA']._serialized_end=2498 + _globals['_DYNAMICWORKFLOWNODEMETADATA']._serialized_start=2501 + _globals['_DYNAMICWORKFLOWNODEMETADATA']._serialized_end=2707 + _globals['_PARENTTASKEXECUTIONMETADATA']._serialized_start=2709 + _globals['_PARENTTASKEXECUTIONMETADATA']._serialized_end=2794 + _globals['_PARENTNODEEXECUTIONMETADATA']._serialized_start=2796 + _globals['_PARENTNODEEXECUTIONMETADATA']._serialized_end=2850 + _globals['_EVENTREASON']._serialized_start=2852 + _globals['_EVENTREASON']._serialized_end=2950 + _globals['_TASKEXECUTIONEVENT']._serialized_start=2953 + _globals['_TASKEXECUTIONEVENT']._serialized_end=4000 + _globals['_EXTERNALRESOURCEINFO']._serialized_start=4003 + _globals['_EXTERNALRESOURCEINFO']._serialized_end=4289 + _globals['_RESOURCEPOOLINFO']._serialized_start=4291 + _globals['_RESOURCEPOOLINFO']._serialized_end=4382 + _globals['_TASKEXECUTIONMETADATA']._serialized_start=4385 + _globals['_TASKEXECUTIONMETADATA']._serialized_end=4798 + _globals['_TASKEXECUTIONMETADATA_INSTANCECLASS']._serialized_start=4751 + _globals['_TASKEXECUTIONMETADATA_INSTANCECLASS']._serialized_end=4798 # @@protoc_insertion_point(module_scope) diff --git a/flyteidl/gen/pb_python/flyteidl/event/event_pb2.pyi b/flyteidl/gen/pb_python/flyteidl/event/event_pb2.pyi index c159089083..4defca30bf 100644 --- a/flyteidl/gen/pb_python/flyteidl/event/event_pb2.pyi +++ b/flyteidl/gen/pb_python/flyteidl/event/event_pb2.pyi @@ -32,7 +32,7 @@ class WorkflowExecutionEvent(_message.Message): def __init__(self, execution_id: _Optional[_Union[_identifier_pb2.WorkflowExecutionIdentifier, _Mapping]] = ..., producer_id: _Optional[str] = ..., phase: _Optional[_Union[_execution_pb2.WorkflowExecution.Phase, str]] = ..., occurred_at: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., output_uri: _Optional[str] = ..., error: _Optional[_Union[_execution_pb2.ExecutionError, _Mapping]] = ..., output_data: _Optional[_Union[_literals_pb2.LiteralMap, _Mapping]] = ...) -> None: ... class NodeExecutionEvent(_message.Message): - __slots__ = ["id", "producer_id", "phase", "occurred_at", "input_uri", "input_data", "output_uri", "error", "output_data", "workflow_node_metadata", "task_node_metadata", "parent_task_metadata", "parent_node_metadata", "retry_group", "spec_node_id", "node_name", "event_version", "is_parent", "is_dynamic", "deck_uri", "reported_at", "is_array", "target_entity", "is_in_dynamic_chain"] + __slots__ = ["id", "producer_id", "phase", "occurred_at", "input_uri", "input_data", "output_uri", "error", "output_data", "workflow_node_metadata", "task_node_metadata", "parent_task_metadata", "parent_node_metadata", "retry_group", "spec_node_id", "node_name", "event_version", "is_parent", "is_dynamic", "deck_uri", "reported_at", "is_array", "target_entity", "is_in_dynamic_chain", "is_eager"] ID_FIELD_NUMBER: _ClassVar[int] PRODUCER_ID_FIELD_NUMBER: _ClassVar[int] PHASE_FIELD_NUMBER: _ClassVar[int] @@ -57,6 +57,7 @@ class NodeExecutionEvent(_message.Message): IS_ARRAY_FIELD_NUMBER: _ClassVar[int] TARGET_ENTITY_FIELD_NUMBER: _ClassVar[int] IS_IN_DYNAMIC_CHAIN_FIELD_NUMBER: _ClassVar[int] + IS_EAGER_FIELD_NUMBER: _ClassVar[int] id: _identifier_pb2.NodeExecutionIdentifier producer_id: str phase: _execution_pb2.NodeExecution.Phase @@ -81,7 +82,8 @@ class NodeExecutionEvent(_message.Message): is_array: bool target_entity: _identifier_pb2.Identifier is_in_dynamic_chain: bool - def __init__(self, id: _Optional[_Union[_identifier_pb2.NodeExecutionIdentifier, _Mapping]] = ..., producer_id: _Optional[str] = ..., phase: _Optional[_Union[_execution_pb2.NodeExecution.Phase, str]] = ..., occurred_at: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., input_uri: _Optional[str] = ..., input_data: _Optional[_Union[_literals_pb2.LiteralMap, _Mapping]] = ..., output_uri: _Optional[str] = ..., error: _Optional[_Union[_execution_pb2.ExecutionError, _Mapping]] = ..., output_data: _Optional[_Union[_literals_pb2.LiteralMap, _Mapping]] = ..., workflow_node_metadata: _Optional[_Union[WorkflowNodeMetadata, _Mapping]] = ..., task_node_metadata: _Optional[_Union[TaskNodeMetadata, _Mapping]] = ..., parent_task_metadata: _Optional[_Union[ParentTaskExecutionMetadata, _Mapping]] = ..., parent_node_metadata: _Optional[_Union[ParentNodeExecutionMetadata, _Mapping]] = ..., retry_group: _Optional[str] = ..., spec_node_id: _Optional[str] = ..., node_name: _Optional[str] = ..., event_version: _Optional[int] = ..., is_parent: bool = ..., is_dynamic: bool = ..., deck_uri: _Optional[str] = ..., reported_at: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., is_array: bool = ..., target_entity: _Optional[_Union[_identifier_pb2.Identifier, _Mapping]] = ..., is_in_dynamic_chain: bool = ...) -> None: ... + is_eager: bool + def __init__(self, id: _Optional[_Union[_identifier_pb2.NodeExecutionIdentifier, _Mapping]] = ..., producer_id: _Optional[str] = ..., phase: _Optional[_Union[_execution_pb2.NodeExecution.Phase, str]] = ..., occurred_at: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., input_uri: _Optional[str] = ..., input_data: _Optional[_Union[_literals_pb2.LiteralMap, _Mapping]] = ..., output_uri: _Optional[str] = ..., error: _Optional[_Union[_execution_pb2.ExecutionError, _Mapping]] = ..., output_data: _Optional[_Union[_literals_pb2.LiteralMap, _Mapping]] = ..., workflow_node_metadata: _Optional[_Union[WorkflowNodeMetadata, _Mapping]] = ..., task_node_metadata: _Optional[_Union[TaskNodeMetadata, _Mapping]] = ..., parent_task_metadata: _Optional[_Union[ParentTaskExecutionMetadata, _Mapping]] = ..., parent_node_metadata: _Optional[_Union[ParentNodeExecutionMetadata, _Mapping]] = ..., retry_group: _Optional[str] = ..., spec_node_id: _Optional[str] = ..., node_name: _Optional[str] = ..., event_version: _Optional[int] = ..., is_parent: bool = ..., is_dynamic: bool = ..., deck_uri: _Optional[str] = ..., reported_at: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., is_array: bool = ..., target_entity: _Optional[_Union[_identifier_pb2.Identifier, _Mapping]] = ..., is_in_dynamic_chain: bool = ..., is_eager: bool = ...) -> None: ... class WorkflowNodeMetadata(_message.Message): __slots__ = ["execution_id"] diff --git a/flyteidl/gen/pb_python/flyteidl/plugins/ray_pb2.py b/flyteidl/gen/pb_python/flyteidl/plugins/ray_pb2.py index af809dcfcd..c625fd957b 100644 --- a/flyteidl/gen/pb_python/flyteidl/plugins/ray_pb2.py +++ b/flyteidl/gen/pb_python/flyteidl/plugins/ray_pb2.py @@ -11,9 +11,10 @@ _sym_db = _symbol_database.Default() +from flyteidl.core import tasks_pb2 as flyteidl_dot_core_dot_tasks__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1a\x66lyteidl/plugins/ray.proto\x12\x10\x66lyteidl.plugins\"\x92\x02\n\x06RayJob\x12=\n\x0bray_cluster\x18\x01 \x01(\x0b\x32\x1c.flyteidl.plugins.RayClusterR\nrayCluster\x12#\n\x0bruntime_env\x18\x02 \x01(\tB\x02\x18\x01R\nruntimeEnv\x12=\n\x1bshutdown_after_job_finishes\x18\x03 \x01(\x08R\x18shutdownAfterJobFinishes\x12;\n\x1attl_seconds_after_finished\x18\x04 \x01(\x05R\x17ttlSecondsAfterFinished\x12(\n\x10runtime_env_yaml\x18\x05 \x01(\tR\x0eruntimeEnvYaml\"\xd3\x01\n\nRayCluster\x12G\n\x0fhead_group_spec\x18\x01 \x01(\x0b\x32\x1f.flyteidl.plugins.HeadGroupSpecR\rheadGroupSpec\x12M\n\x11worker_group_spec\x18\x02 \x03(\x0b\x32!.flyteidl.plugins.WorkerGroupSpecR\x0fworkerGroupSpec\x12-\n\x12\x65nable_autoscaling\x18\x03 \x01(\x08R\x11\x65nableAutoscaling\"\xb1\x01\n\rHeadGroupSpec\x12]\n\x10ray_start_params\x18\x01 \x03(\x0b\x32\x33.flyteidl.plugins.HeadGroupSpec.RayStartParamsEntryR\x0erayStartParams\x1a\x41\n\x13RayStartParamsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\"\xb6\x02\n\x0fWorkerGroupSpec\x12\x1d\n\ngroup_name\x18\x01 \x01(\tR\tgroupName\x12\x1a\n\x08replicas\x18\x02 \x01(\x05R\x08replicas\x12!\n\x0cmin_replicas\x18\x03 \x01(\x05R\x0bminReplicas\x12!\n\x0cmax_replicas\x18\x04 \x01(\x05R\x0bmaxReplicas\x12_\n\x10ray_start_params\x18\x05 \x03(\x0b\x32\x35.flyteidl.plugins.WorkerGroupSpec.RayStartParamsEntryR\x0erayStartParams\x1a\x41\n\x13RayStartParamsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\x42\xc0\x01\n\x14\x63om.flyteidl.pluginsB\x08RayProtoP\x01Z=github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins\xa2\x02\x03\x46PX\xaa\x02\x10\x46lyteidl.Plugins\xca\x02\x10\x46lyteidl\\Plugins\xe2\x02\x1c\x46lyteidl\\Plugins\\GPBMetadata\xea\x02\x11\x46lyteidl::Pluginsb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1a\x66lyteidl/plugins/ray.proto\x12\x10\x66lyteidl.plugins\x1a\x19\x66lyteidl/core/tasks.proto\"\x92\x02\n\x06RayJob\x12=\n\x0bray_cluster\x18\x01 \x01(\x0b\x32\x1c.flyteidl.plugins.RayClusterR\nrayCluster\x12#\n\x0bruntime_env\x18\x02 \x01(\tB\x02\x18\x01R\nruntimeEnv\x12=\n\x1bshutdown_after_job_finishes\x18\x03 \x01(\x08R\x18shutdownAfterJobFinishes\x12;\n\x1attl_seconds_after_finished\x18\x04 \x01(\x05R\x17ttlSecondsAfterFinished\x12(\n\x10runtime_env_yaml\x18\x05 \x01(\tR\x0eruntimeEnvYaml\"\xd3\x01\n\nRayCluster\x12G\n\x0fhead_group_spec\x18\x01 \x01(\x0b\x32\x1f.flyteidl.plugins.HeadGroupSpecR\rheadGroupSpec\x12M\n\x11worker_group_spec\x18\x02 \x03(\x0b\x32!.flyteidl.plugins.WorkerGroupSpecR\x0fworkerGroupSpec\x12-\n\x12\x65nable_autoscaling\x18\x03 \x01(\x08R\x11\x65nableAutoscaling\"\xe1\x01\n\rHeadGroupSpec\x12]\n\x10ray_start_params\x18\x01 \x03(\x0b\x32\x33.flyteidl.plugins.HeadGroupSpec.RayStartParamsEntryR\x0erayStartParams\x12.\n\x07k8s_pod\x18\x02 \x01(\x0b\x32\x15.flyteidl.core.K8sPodR\x06k8sPod\x1a\x41\n\x13RayStartParamsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\"\xe6\x02\n\x0fWorkerGroupSpec\x12\x1d\n\ngroup_name\x18\x01 \x01(\tR\tgroupName\x12\x1a\n\x08replicas\x18\x02 \x01(\x05R\x08replicas\x12!\n\x0cmin_replicas\x18\x03 \x01(\x05R\x0bminReplicas\x12!\n\x0cmax_replicas\x18\x04 \x01(\x05R\x0bmaxReplicas\x12_\n\x10ray_start_params\x18\x05 \x03(\x0b\x32\x35.flyteidl.plugins.WorkerGroupSpec.RayStartParamsEntryR\x0erayStartParams\x12.\n\x07k8s_pod\x18\x06 \x01(\x0b\x32\x15.flyteidl.core.K8sPodR\x06k8sPod\x1a\x41\n\x13RayStartParamsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\x42\xc0\x01\n\x14\x63om.flyteidl.pluginsB\x08RayProtoP\x01Z=github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins\xa2\x02\x03\x46PX\xaa\x02\x10\x46lyteidl.Plugins\xca\x02\x10\x46lyteidl\\Plugins\xe2\x02\x1c\x46lyteidl\\Plugins\\GPBMetadata\xea\x02\x11\x46lyteidl::Pluginsb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -28,16 +29,16 @@ _HEADGROUPSPEC_RAYSTARTPARAMSENTRY._serialized_options = b'8\001' _WORKERGROUPSPEC_RAYSTARTPARAMSENTRY._options = None _WORKERGROUPSPEC_RAYSTARTPARAMSENTRY._serialized_options = b'8\001' - _globals['_RAYJOB']._serialized_start=49 - _globals['_RAYJOB']._serialized_end=323 - _globals['_RAYCLUSTER']._serialized_start=326 - _globals['_RAYCLUSTER']._serialized_end=537 - _globals['_HEADGROUPSPEC']._serialized_start=540 - _globals['_HEADGROUPSPEC']._serialized_end=717 - _globals['_HEADGROUPSPEC_RAYSTARTPARAMSENTRY']._serialized_start=652 - _globals['_HEADGROUPSPEC_RAYSTARTPARAMSENTRY']._serialized_end=717 - _globals['_WORKERGROUPSPEC']._serialized_start=720 - _globals['_WORKERGROUPSPEC']._serialized_end=1030 - _globals['_WORKERGROUPSPEC_RAYSTARTPARAMSENTRY']._serialized_start=652 - _globals['_WORKERGROUPSPEC_RAYSTARTPARAMSENTRY']._serialized_end=717 + _globals['_RAYJOB']._serialized_start=76 + _globals['_RAYJOB']._serialized_end=350 + _globals['_RAYCLUSTER']._serialized_start=353 + _globals['_RAYCLUSTER']._serialized_end=564 + _globals['_HEADGROUPSPEC']._serialized_start=567 + _globals['_HEADGROUPSPEC']._serialized_end=792 + _globals['_HEADGROUPSPEC_RAYSTARTPARAMSENTRY']._serialized_start=727 + _globals['_HEADGROUPSPEC_RAYSTARTPARAMSENTRY']._serialized_end=792 + _globals['_WORKERGROUPSPEC']._serialized_start=795 + _globals['_WORKERGROUPSPEC']._serialized_end=1153 + _globals['_WORKERGROUPSPEC_RAYSTARTPARAMSENTRY']._serialized_start=727 + _globals['_WORKERGROUPSPEC_RAYSTARTPARAMSENTRY']._serialized_end=792 # @@protoc_insertion_point(module_scope) diff --git a/flyteidl/gen/pb_python/flyteidl/plugins/ray_pb2.pyi b/flyteidl/gen/pb_python/flyteidl/plugins/ray_pb2.pyi index 81d7382063..239e2fbc1a 100644 --- a/flyteidl/gen/pb_python/flyteidl/plugins/ray_pb2.pyi +++ b/flyteidl/gen/pb_python/flyteidl/plugins/ray_pb2.pyi @@ -1,3 +1,4 @@ +from flyteidl.core import tasks_pb2 as _tasks_pb2 from google.protobuf.internal import containers as _containers from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message @@ -30,7 +31,7 @@ class RayCluster(_message.Message): def __init__(self, head_group_spec: _Optional[_Union[HeadGroupSpec, _Mapping]] = ..., worker_group_spec: _Optional[_Iterable[_Union[WorkerGroupSpec, _Mapping]]] = ..., enable_autoscaling: bool = ...) -> None: ... class HeadGroupSpec(_message.Message): - __slots__ = ["ray_start_params"] + __slots__ = ["ray_start_params", "k8s_pod"] class RayStartParamsEntry(_message.Message): __slots__ = ["key", "value"] KEY_FIELD_NUMBER: _ClassVar[int] @@ -39,11 +40,13 @@ class HeadGroupSpec(_message.Message): value: str def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... RAY_START_PARAMS_FIELD_NUMBER: _ClassVar[int] + K8S_POD_FIELD_NUMBER: _ClassVar[int] ray_start_params: _containers.ScalarMap[str, str] - def __init__(self, ray_start_params: _Optional[_Mapping[str, str]] = ...) -> None: ... + k8s_pod: _tasks_pb2.K8sPod + def __init__(self, ray_start_params: _Optional[_Mapping[str, str]] = ..., k8s_pod: _Optional[_Union[_tasks_pb2.K8sPod, _Mapping]] = ...) -> None: ... class WorkerGroupSpec(_message.Message): - __slots__ = ["group_name", "replicas", "min_replicas", "max_replicas", "ray_start_params"] + __slots__ = ["group_name", "replicas", "min_replicas", "max_replicas", "ray_start_params", "k8s_pod"] class RayStartParamsEntry(_message.Message): __slots__ = ["key", "value"] KEY_FIELD_NUMBER: _ClassVar[int] @@ -56,9 +59,11 @@ class WorkerGroupSpec(_message.Message): MIN_REPLICAS_FIELD_NUMBER: _ClassVar[int] MAX_REPLICAS_FIELD_NUMBER: _ClassVar[int] RAY_START_PARAMS_FIELD_NUMBER: _ClassVar[int] + K8S_POD_FIELD_NUMBER: _ClassVar[int] group_name: str replicas: int min_replicas: int max_replicas: int ray_start_params: _containers.ScalarMap[str, str] - def __init__(self, group_name: _Optional[str] = ..., replicas: _Optional[int] = ..., min_replicas: _Optional[int] = ..., max_replicas: _Optional[int] = ..., ray_start_params: _Optional[_Mapping[str, str]] = ...) -> None: ... + k8s_pod: _tasks_pb2.K8sPod + def __init__(self, group_name: _Optional[str] = ..., replicas: _Optional[int] = ..., min_replicas: _Optional[int] = ..., max_replicas: _Optional[int] = ..., ray_start_params: _Optional[_Mapping[str, str]] = ..., k8s_pod: _Optional[_Union[_tasks_pb2.K8sPod, _Mapping]] = ...) -> None: ... diff --git a/flyteidl/gen/pb_rust/flyteidl.admin.rs b/flyteidl/gen/pb_rust/flyteidl.admin.rs index 30f39ab45d..dd099daa67 100644 --- a/flyteidl/gen/pb_rust/flyteidl.admin.rs +++ b/flyteidl/gen/pb_rust/flyteidl.admin.rs @@ -177,6 +177,9 @@ pub struct Resource { /// Custom data specific to the agent. #[prost(message, optional, tag="6")] pub custom_info: ::core::option::Option<::prost_types::Struct>, + /// The error raised during execution + #[prost(message, optional, tag="7")] + pub agent_error: ::core::option::Option, } /// A message used to delete a task. #[allow(clippy::derive_partial_eq_without_eq)] @@ -348,6 +351,51 @@ pub mod get_task_logs_response { Body(super::GetTaskLogsResponseBody), } } +/// Error message to propagate detailed errors from agent executions to the execution +/// engine. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AgentError { + /// A simplified code for errors, so that we can provide a glossary of all possible errors. + #[prost(string, tag="1")] + pub code: ::prost::alloc::string::String, + /// An abstract error kind for this error. Defaults to Non_Recoverable if not specified. + #[prost(enumeration="agent_error::Kind", tag="3")] + pub kind: i32, + /// Defines the origin of the error (system, user, unknown). + #[prost(enumeration="super::core::execution_error::ErrorKind", tag="4")] + pub origin: i32, +} +/// Nested message and enum types in `AgentError`. +pub mod agent_error { + /// Defines a generic error type that dictates the behavior of the retry strategy. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum Kind { + NonRecoverable = 0, + Recoverable = 1, + } + impl Kind { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Kind::NonRecoverable => "NON_RECOVERABLE", + Kind::Recoverable => "RECOVERABLE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "NON_RECOVERABLE" => Some(Self::NonRecoverable), + "RECOVERABLE" => Some(Self::Recoverable), + _ => None, + } + } + } +} /// The state of the execution is used to control its visibility in the UI/CLI. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] @@ -2274,6 +2322,9 @@ pub struct NodeExecutionMetaData { /// array nodes from other nodes which can have is_parent_node as true. #[prost(bool, tag="5")] pub is_array: bool, + /// Whether this node is an eager node. + #[prost(bool, tag="6")] + pub is_eager: bool, } /// Request structure to retrieve a list of node execution entities. /// See :ref:`ref_flyteidl.admin.NodeExecution` for more details diff --git a/flyteidl/gen/pb_rust/flyteidl.core.rs b/flyteidl/gen/pb_rust/flyteidl.core.rs index a97a209a47..0cfc6a19ae 100644 --- a/flyteidl/gen/pb_rust/flyteidl.core.rs +++ b/flyteidl/gen/pb_rust/flyteidl.core.rs @@ -1385,6 +1385,10 @@ pub struct TaskMetadata { /// cache_ignore_input_vars is the input variables that should not be included when calculating hash for cache. #[prost(string, repeated, tag="13")] pub cache_ignore_input_vars: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// is_eager indicates whether the task is eager or not. + /// This would be used by CreateTask endpoint. + #[prost(bool, tag="14")] + pub is_eager: bool, // For interruptible we will populate it at the node level but require it be part of TaskMetadata // for a user to set the value. // We are using oneof instead of bool because otherwise we would be unable to distinguish between value being @@ -2431,6 +2435,9 @@ pub struct ArrayNode { /// execution_mode determines the execution path for ArrayNode. #[prost(enumeration="array_node::ExecutionMode", tag="5")] pub execution_mode: i32, + /// Indicates whether the sub node's original interface was altered + #[prost(message, optional, tag="6")] + pub is_original_sub_node_interface: ::core::option::Option, #[prost(oneof="array_node::ParallelismOption", tags="2")] pub parallelism_option: ::core::option::Option, #[prost(oneof="array_node::SuccessCriteria", tags="3, 4")] diff --git a/flyteidl/gen/pb_rust/flyteidl.event.rs b/flyteidl/gen/pb_rust/flyteidl.event.rs index 80a8a11442..589d6c8a83 100644 --- a/flyteidl/gen/pb_rust/flyteidl.event.rs +++ b/flyteidl/gen/pb_rust/flyteidl.event.rs @@ -100,6 +100,9 @@ pub struct NodeExecutionEvent { /// as well as task IDs in any corresponding Task Executions, should not be used to looked up the task in Admin's db. #[prost(bool, tag="24")] pub is_in_dynamic_chain: bool, + /// Whether this node launched an eager task. + #[prost(bool, tag="25")] + pub is_eager: bool, #[prost(oneof="node_execution_event::InputValue", tags="5, 20")] pub input_value: ::core::option::Option, #[prost(oneof="node_execution_event::OutputResult", tags="6, 7, 15")] diff --git a/flyteidl/gen/pb_rust/flyteidl.plugins.rs b/flyteidl/gen/pb_rust/flyteidl.plugins.rs index 0252c9d882..65f187c3e0 100644 --- a/flyteidl/gen/pb_rust/flyteidl.plugins.rs +++ b/flyteidl/gen/pb_rust/flyteidl.plugins.rs @@ -255,6 +255,9 @@ pub struct HeadGroupSpec { /// Refer to #[prost(map="string, string", tag="1")] pub ray_start_params: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + /// Pod Spec for the ray head pod + #[prost(message, optional, tag="2")] + pub k8s_pod: ::core::option::Option, } /// WorkerGroupSpec are the specs for the worker pods #[allow(clippy::derive_partial_eq_without_eq)] @@ -276,6 +279,9 @@ pub struct WorkerGroupSpec { /// Refer to #[prost(map="string, string", tag="5")] pub ray_start_params: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + /// Pod Spec for ray worker pods + #[prost(message, optional, tag="6")] + pub k8s_pod: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] diff --git a/flyteidl/go.mod b/flyteidl/go.mod index 4c913dcb4d..6653ce5a87 100644 --- a/flyteidl/go.mod +++ b/flyteidl/go.mod @@ -5,6 +5,7 @@ go 1.22 require ( github.com/flyteorg/flyte/flytestdlib v0.0.0-00010101000000-000000000000 github.com/go-test/deep v1.0.7 + github.com/golang-jwt/jwt/v5 v5.2.1 github.com/golang/protobuf v1.5.3 github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 @@ -13,6 +14,7 @@ require ( github.com/mitchellh/mapstructure v1.5.0 github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c github.com/pkg/errors v0.9.1 + github.com/shamaton/msgpack/v2 v2.2.2 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 golang.org/x/net v0.27.0 @@ -54,7 +56,6 @@ require ( github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect diff --git a/flyteidl/go.sum b/flyteidl/go.sum index f440e247e9..b398d5d02f 100644 --- a/flyteidl/go.sum +++ b/flyteidl/go.sum @@ -215,6 +215,8 @@ github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoG github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shamaton/msgpack/v2 v2.2.2 h1:GOIg0c9LV04VwzOOqZSrmsv/JzjNOOMxnS/HvOHGdgs= +github.com/shamaton/msgpack/v2 v2.2.2/go.mod h1:6khjYnkx73f7VQU7wjcFS9DFjs+59naVWJv1TB7qdOI= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= diff --git a/flyteidl/protos/flyteidl/admin/agent.proto b/flyteidl/protos/flyteidl/admin/agent.proto index 931c27785f..96fbb087b8 100644 --- a/flyteidl/protos/flyteidl/admin/agent.proto +++ b/flyteidl/protos/flyteidl/admin/agent.proto @@ -138,6 +138,8 @@ message Resource { core.TaskExecution.Phase phase = 5; // Custom data specific to the agent. google.protobuf.Struct custom_info = 6; + // The error raised during execution + AgentError agent_error = 7; } // A message used to delete a task. @@ -256,3 +258,22 @@ message GetTaskLogsResponse { GetTaskLogsResponseBody body = 2; } } + +// Error message to propagate detailed errors from agent executions to the execution +// engine. +message AgentError { + // A simplified code for errors, so that we can provide a glossary of all possible errors. + string code = 1; + + // Defines a generic error type that dictates the behavior of the retry strategy. + enum Kind { + NON_RECOVERABLE = 0; + RECOVERABLE = 1; + } + + // An abstract error kind for this error. Defaults to Non_Recoverable if not specified. + Kind kind = 3; + + // Defines the origin of the error (system, user, unknown). + core.ExecutionError.ErrorKind origin = 4; +} diff --git a/flyteidl/protos/flyteidl/admin/common.proto b/flyteidl/protos/flyteidl/admin/common.proto index 6c04b0531a..58eda8e491 100644 --- a/flyteidl/protos/flyteidl/admin/common.proto +++ b/flyteidl/protos/flyteidl/admin/common.proto @@ -29,6 +29,7 @@ message NamedEntityIdentifier { // The status of the named entity is used to control its visibility in the UI. enum NamedEntityState { + reserved 3, 4; // By default, all named entities are considered active and under development. NAMED_ENTITY_ACTIVE = 0; diff --git a/flyteidl/protos/flyteidl/admin/execution.proto b/flyteidl/protos/flyteidl/admin/execution.proto index 6197576bd9..d3802057a6 100644 --- a/flyteidl/protos/flyteidl/admin/execution.proto +++ b/flyteidl/protos/flyteidl/admin/execution.proto @@ -206,6 +206,8 @@ message SystemMetadata { message ExecutionMetadata { // The method by which this execution was launched. enum ExecutionMode { + reserved 7; + // The default execution mode, MANUAL implies that an execution was launched by an individual. MANUAL = 0; diff --git a/flyteidl/protos/flyteidl/admin/node_execution.proto b/flyteidl/protos/flyteidl/admin/node_execution.proto index 411201ea45..7ae7aa7ee1 100644 --- a/flyteidl/protos/flyteidl/admin/node_execution.proto +++ b/flyteidl/protos/flyteidl/admin/node_execution.proto @@ -118,6 +118,9 @@ message NodeExecutionMetaData { // Boolean flag indicating if the node is an array node. This is intended to uniquely identify // array nodes from other nodes which can have is_parent_node as true. bool is_array = 5; + + // Whether this node is an eager node. + bool is_eager = 6; } // Request structure to retrieve a list of node execution entities. diff --git a/flyteidl/protos/flyteidl/core/tasks.proto b/flyteidl/protos/flyteidl/core/tasks.proto index 20a1fa0cbf..332f9fdad4 100644 --- a/flyteidl/protos/flyteidl/core/tasks.proto +++ b/flyteidl/protos/flyteidl/core/tasks.proto @@ -134,6 +134,9 @@ message TaskMetadata { // cache_ignore_input_vars is the input variables that should not be included when calculating hash for cache. repeated string cache_ignore_input_vars = 13; + // is_eager indicates whether the task is eager or not. + // This would be used by CreateTask endpoint. + bool is_eager = 14; } // A Task structure that uniquely identifies a task in the system diff --git a/flyteidl/protos/flyteidl/core/workflow.proto b/flyteidl/protos/flyteidl/core/workflow.proto index 3df4b2422f..cf75850ece 100644 --- a/flyteidl/protos/flyteidl/core/workflow.proto +++ b/flyteidl/protos/flyteidl/core/workflow.proto @@ -147,6 +147,9 @@ message ArrayNode { // execution_mode determines the execution path for ArrayNode. ExecutionMode execution_mode = 5; + + // Indicates whether the sub node's original interface was altered + google.protobuf.BoolValue is_original_sub_node_interface = 6; } // Defines extra information about the Node. diff --git a/flyteidl/protos/flyteidl/event/event.proto b/flyteidl/protos/flyteidl/event/event.proto index 640b4804e9..2840f5a57e 100644 --- a/flyteidl/protos/flyteidl/event/event.proto +++ b/flyteidl/protos/flyteidl/event/event.proto @@ -127,6 +127,9 @@ message NodeExecutionEvent { // if the relevant execution entity is was registered, or dynamic. This field indicates that the target_entity ID, // as well as task IDs in any corresponding Task Executions, should not be used to looked up the task in Admin's db. bool is_in_dynamic_chain = 24; + + // Whether this node launched an eager task. + bool is_eager = 25; } // For Workflow Nodes we need to send information about the workflow that's launched diff --git a/flyteidl/protos/flyteidl/plugins/ray.proto b/flyteidl/protos/flyteidl/plugins/ray.proto index c20c6360e7..749444ee04 100644 --- a/flyteidl/protos/flyteidl/plugins/ray.proto +++ b/flyteidl/protos/flyteidl/plugins/ray.proto @@ -4,6 +4,8 @@ package flyteidl.plugins; option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; +import "flyteidl/core/tasks.proto"; + // RayJobSpec defines the desired state of RayJob message RayJob { // RayClusterSpec is the cluster template to run the job @@ -35,6 +37,8 @@ message HeadGroupSpec { // Optional. RayStartParams are the params of the start command: address, object-store-memory. // Refer to https://docs.ray.io/en/latest/ray-core/package-ref.html#ray-start map ray_start_params = 1; + // Pod Spec for the ray head pod + core.K8sPod k8s_pod = 2; } // WorkerGroupSpec are the specs for the worker pods @@ -50,4 +54,6 @@ message WorkerGroupSpec { // Optional. RayStartParams are the params of the start command: address, object-store-memory. // Refer to https://docs.ray.io/en/latest/ray-core/package-ref.html#ray-start map ray_start_params = 5; + // Pod Spec for ray worker pods + core.K8sPod k8s_pod = 6; } diff --git a/flyteplugins/.golangci.yml b/flyteplugins/.golangci.yml index 6d13f4a3b6..9b6ab1e86d 100644 --- a/flyteplugins/.golangci.yml +++ b/flyteplugins/.golangci.yml @@ -1,35 +1,25 @@ -# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. -# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY: -# -# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst - run: skip-dirs: - pkg/client - linters: disable-all: true enable: - - deadcode - errcheck - - gas + - gosec - gci - goconst - goimports - - golint - gosimple - govet - ineffassign - misspell - nakedret - staticcheck - - structcheck - typecheck - unconvert - unparam - unused - - varcheck - + - protogetter linters-settings: gci: custom-order: true diff --git a/flyteplugins/go.mod b/flyteplugins/go.mod index 7616900390..11962b93c9 100644 --- a/flyteplugins/go.mod +++ b/flyteplugins/go.mod @@ -11,6 +11,7 @@ require ( github.com/coocood/freecache v1.1.1 github.com/dask/dask-kubernetes/v2023 v2023.0.0-20230626103304-abd02cd17b26 github.com/flyteorg/flyte/flyteidl v0.0.0-00010101000000-000000000000 + github.com/flyteorg/flyte/flytepropeller v0.0.0-00010101000000-000000000000 github.com/flyteorg/flyte/flytestdlib v0.0.0-00010101000000-000000000000 github.com/go-test/deep v1.0.7 github.com/golang/protobuf v1.5.3 @@ -53,6 +54,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/Masterminds/semver v1.5.0 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.0.0 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.0 // indirect @@ -117,7 +119,7 @@ require ( github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.2.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/exporters/jaeger v1.17.0 // indirect diff --git a/flyteplugins/go.sum b/flyteplugins/go.sum index d11f6b60a3..18242a8638 100644 --- a/flyteplugins/go.sum +++ b/flyteplugins/go.sum @@ -62,6 +62,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/GoogleCloudPlatform/spark-on-k8s-operator v0.0.0-20200723154620-6f35a1152625 h1:cQyO5JQ2iuHnEcF3v24kdDMsgh04RjyFPDtuvD6PCE0= github.com/GoogleCloudPlatform/spark-on-k8s-operator v0.0.0-20200723154620-6f35a1152625/go.mod h1:6PnrZv6zUDkrNMw0mIoGRmGBR7i9LulhKPmxFq4rUiM= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/aws/aws-sdk-go v1.47.11 h1:Dol+MA+hQblbnXUI3Vk9qvoekU6O1uDEuAItezjiWNQ= @@ -310,6 +312,7 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/ncw/swift v1.0.53 h1:luHjjTNtekIEvHg5KdAFIBaH7bWfNkefwFnpDffSIks= github.com/ncw/swift v1.0.53/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= @@ -389,8 +392,8 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= diff --git a/flyteplugins/go/tasks/logs/logging_utils.go b/flyteplugins/go/tasks/logs/logging_utils.go index 4bfff0dd17..c237fc15c8 100644 --- a/flyteplugins/go/tasks/logs/logging_utils.go +++ b/flyteplugins/go/tasks/logs/logging_utils.go @@ -24,12 +24,14 @@ func GetLogsForContainerInPod(ctx context.Context, logPlugin tasklog.Plugin, tas return nil, nil } + // #nosec G115 if uint32(len(pod.Spec.Containers)) <= index { logger.Errorf(ctx, "container IndexOutOfBound, requested [%d], but total containers [%d] in pod phase [%v]", index, len(pod.Spec.Containers), pod.Status.Phase) return nil, nil } containerID := v1.ContainerStatus{}.ContainerID + // #nosec G115 if uint32(len(pod.Status.ContainerStatuses)) <= index { msg := fmt.Sprintf("containerStatus IndexOutOfBound, requested [%d], but total containerStatuses [%d] in pod phase [%v]", index, len(pod.Status.ContainerStatuses), pod.Status.Phase) if pod.Status.Phase == v1.PodPending { @@ -129,6 +131,8 @@ func InitializeLogPlugins(cfg *LogConfig) (tasklog.Plugin, error) { DisplayName: dynamicLogLink.DisplayName, DynamicTemplateURIs: dynamicLogLink.TemplateURIs, MessageFormat: core.TaskLog_JSON, + ShowWhilePending: dynamicLogLink.ShowWhilePending, + HideOnceFinished: dynamicLogLink.HideOnceFinished, }) } diff --git a/flyteplugins/go/tasks/pluginmachinery/bundle/fail_fast.go b/flyteplugins/go/tasks/pluginmachinery/bundle/fail_fast.go index a36edf20ea..0447b488cd 100644 --- a/flyteplugins/go/tasks/pluginmachinery/bundle/fail_fast.go +++ b/flyteplugins/go/tasks/pluginmachinery/bundle/fail_fast.go @@ -31,7 +31,7 @@ func (h failFastHandler) Handle(ctx context.Context, tCtx core.TaskExecutionCont } return core.DoTransition(core.PhaseInfoFailure("AlwaysFail", fmt.Sprintf("Task [%s] type [%+v] not supported by platform for this project/domain/workflow", - taskTemplate.Type, tCtx.TaskExecutionMetadata().GetTaskExecutionID()), &core.TaskInfo{ + taskTemplate.GetType(), tCtx.TaskExecutionMetadata().GetTaskExecutionID()), &core.TaskInfo{ OccurredAt: &occuredAt, })), nil } diff --git a/flyteplugins/go/tasks/pluginmachinery/bundle/fail_fast_test.go b/flyteplugins/go/tasks/pluginmachinery/bundle/fail_fast_test.go index c302db8c32..bf856290d1 100644 --- a/flyteplugins/go/tasks/pluginmachinery/bundle/fail_fast_test.go +++ b/flyteplugins/go/tasks/pluginmachinery/bundle/fail_fast_test.go @@ -48,8 +48,8 @@ func TestHandleAlwaysFails(t *testing.T) { transition, err := testHandler.Handle(context.TODO(), taskCtx) assert.NoError(t, err) assert.Equal(t, core.PhasePermanentFailure, transition.Info().Phase()) - assert.Equal(t, "AlwaysFail", transition.Info().Err().Code) - assert.Contains(t, transition.Info().Err().Message, "Task [unsupportedtype]") + assert.Equal(t, "AlwaysFail", transition.Info().Err().GetCode()) + assert.Contains(t, transition.Info().Err().GetMessage(), "Task [unsupportedtype]") } func TestAbort(t *testing.T) { diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/async_client_impl.go b/flyteplugins/go/tasks/pluginmachinery/catalog/async_client_impl.go index c056989905..224cfd612e 100644 --- a/flyteplugins/go/tasks/pluginmachinery/catalog/async_client_impl.go +++ b/flyteplugins/go/tasks/pluginmachinery/catalog/async_client_impl.go @@ -41,7 +41,7 @@ func consistentHash(str string) (string, error) { func hashInputs(ctx context.Context, key Key) (string, error) { inputs := &core.LiteralMap{} - if key.TypedInterface.Inputs != nil { + if key.TypedInterface.GetInputs() != nil { retInputs, err := key.InputReader.Get(ctx) if err != nil { return "", err @@ -88,7 +88,7 @@ func (c AsyncClientImpl) Download(ctx context.Context, requests ...DownloadReque } if readerWorkItem.IsCached() { - cachedResults.Set(uint(idx)) + cachedResults.Set(uint(idx)) // #nosec G115 cachedCount++ } case workqueue.WorkStatusFailed: diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/client_test.go b/flyteplugins/go/tasks/pluginmachinery/catalog/client_test.go index 15a4347351..7bdb435761 100644 --- a/flyteplugins/go/tasks/pluginmachinery/catalog/client_test.go +++ b/flyteplugins/go/tasks/pluginmachinery/catalog/client_test.go @@ -49,11 +49,11 @@ func TestStatus(t *testing.T) { status := NewStatus(cacheStatus, &catalogMetadata) assert.Equal(t, status.GetCacheStatus(), cacheStatus) - assert.Equal(t, status.GetMetadata().DatasetId.Project, catalogMetadata.DatasetId.Project) - assert.Equal(t, status.GetMetadata().DatasetId.Domain, catalogMetadata.DatasetId.Domain) - assert.Equal(t, status.GetMetadata().DatasetId.Name, catalogMetadata.DatasetId.Name) - assert.Equal(t, status.GetMetadata().ArtifactTag.ArtifactId, catalogMetadata.ArtifactTag.ArtifactId) - assert.Equal(t, status.GetMetadata().ArtifactTag.Name, catalogMetadata.ArtifactTag.Name) + assert.Equal(t, status.GetMetadata().GetDatasetId().GetProject(), catalogMetadata.GetDatasetId().GetProject()) + assert.Equal(t, status.GetMetadata().GetDatasetId().GetDomain(), catalogMetadata.GetDatasetId().GetDomain()) + assert.Equal(t, status.GetMetadata().GetDatasetId().GetName(), catalogMetadata.GetDatasetId().GetName()) + assert.Equal(t, status.GetMetadata().GetArtifactTag().GetArtifactId(), catalogMetadata.GetArtifactTag().GetArtifactId()) + assert.Equal(t, status.GetMetadata().GetArtifactTag().GetName(), catalogMetadata.GetArtifactTag().GetName()) } func TestEntry(t *testing.T) { @@ -75,11 +75,11 @@ func TestEntry(t *testing.T) { t.Run(tt.name, func(t *testing.T) { status := tt.entry.GetStatus() assert.Equal(t, status.GetCacheStatus(), cacheStatus) - assert.Equal(t, status.GetMetadata().DatasetId.Project, catalogMetadata.DatasetId.Project) - assert.Equal(t, status.GetMetadata().DatasetId.Domain, catalogMetadata.DatasetId.Domain) - assert.Equal(t, status.GetMetadata().DatasetId.Name, catalogMetadata.DatasetId.Name) - assert.Equal(t, status.GetMetadata().ArtifactTag.ArtifactId, catalogMetadata.ArtifactTag.ArtifactId) - assert.Equal(t, status.GetMetadata().ArtifactTag.Name, catalogMetadata.ArtifactTag.Name) + assert.Equal(t, status.GetMetadata().GetDatasetId().GetProject(), catalogMetadata.GetDatasetId().GetProject()) + assert.Equal(t, status.GetMetadata().GetDatasetId().GetDomain(), catalogMetadata.GetDatasetId().GetDomain()) + assert.Equal(t, status.GetMetadata().GetDatasetId().GetName(), catalogMetadata.GetDatasetId().GetName()) + assert.Equal(t, status.GetMetadata().GetArtifactTag().GetArtifactId(), catalogMetadata.GetArtifactTag().GetArtifactId()) + assert.Equal(t, status.GetMetadata().GetArtifactTag().GetName(), catalogMetadata.GetArtifactTag().GetName()) }) } } diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/hashing.go b/flyteplugins/go/tasks/pluginmachinery/catalog/hashing.go index 4cc2fbd5cd..7dda4afa97 100644 --- a/flyteplugins/go/tasks/pluginmachinery/catalog/hashing.go +++ b/flyteplugins/go/tasks/pluginmachinery/catalog/hashing.go @@ -27,7 +27,7 @@ func hashify(literal *core.Literal) *core.Literal { // 1. A collection of literals or // 2. A map of literals if literal.GetCollection() != nil { - literals := literal.GetCollection().Literals + literals := literal.GetCollection().GetLiterals() literalsHash := make([]*core.Literal, 0) for _, lit := range literals { literalsHash = append(literalsHash, hashify(lit)) @@ -42,7 +42,7 @@ func hashify(literal *core.Literal) *core.Literal { } if literal.GetMap() != nil { literalsMap := make(map[string]*core.Literal) - for key, lit := range literal.GetMap().Literals { + for key, lit := range literal.GetMap().GetLiterals() { literalsMap[key] = hashify(lit) } return &core.Literal{ @@ -58,14 +58,14 @@ func hashify(literal *core.Literal) *core.Literal { } func HashLiteralMap(ctx context.Context, literalMap *core.LiteralMap, cacheIgnoreInputVars []string) (string, error) { - if literalMap == nil || len(literalMap.Literals) == 0 { + if literalMap == nil || len(literalMap.GetLiterals()) == 0 { literalMap = &emptyLiteralMap } // Hashify, i.e. generate a copy of the literal map where each literal value is removed // in case the corresponding hash is set. - hashifiedLiteralMap := make(map[string]*core.Literal, len(literalMap.Literals)) - for name, literal := range literalMap.Literals { + hashifiedLiteralMap := make(map[string]*core.Literal, len(literalMap.GetLiterals())) + for name, literal := range literalMap.GetLiterals() { if !slices.Contains(cacheIgnoreInputVars, name) { hashifiedLiteralMap[name] = hashify(literal) } diff --git a/flyteplugins/go/tasks/pluginmachinery/core/exec_metadata.go b/flyteplugins/go/tasks/pluginmachinery/core/exec_metadata.go index 5969d44661..9a020bd188 100644 --- a/flyteplugins/go/tasks/pluginmachinery/core/exec_metadata.go +++ b/flyteplugins/go/tasks/pluginmachinery/core/exec_metadata.go @@ -27,7 +27,7 @@ type TaskExecutionID interface { GetGeneratedNameWith(minLength, maxLength int) (string, error) // GetID returns the underlying idl task identifier. - GetID() core.TaskExecutionIdentifier + GetID() core.TaskExecutionIdentifier // TODO (whynopointer) // GetUniqueNodeID returns the fully-qualified Node ID that is unique within a // given workflow execution. @@ -48,7 +48,7 @@ type TaskExecutionMetadata interface { GetMaxAttempts() uint32 GetAnnotations() map[string]string GetK8sServiceAccount() string - GetSecurityContext() core.SecurityContext + GetSecurityContext() core.SecurityContext // TODO (whynopointer) IsInterruptible() bool GetPlatformResources() *v1.ResourceRequirements GetInterruptibleFailureThreshold() int32 diff --git a/flyteplugins/go/tasks/pluginmachinery/core/template/template.go b/flyteplugins/go/tasks/pluginmachinery/core/template/template.go index 5aea60c4b9..9192cf851c 100644 --- a/flyteplugins/go/tasks/pluginmachinery/core/template/template.go +++ b/flyteplugins/go/tasks/pluginmachinery/core/template/template.go @@ -162,7 +162,7 @@ func render(ctx context.Context, inputTemplate string, params Parameters, perRet } func transformVarNameToStringVal(ctx context.Context, varName string, inputs *idlCore.LiteralMap) (string, error) { - inputVal, exists := inputs.Literals[varName] + inputVal, exists := inputs.GetLiterals()[varName] if !exists { return "", fmt.Errorf("requested input is not found [%s]", varName) } @@ -175,7 +175,7 @@ func transformVarNameToStringVal(ctx context.Context, varName string, inputs *id } func serializePrimitive(p *idlCore.Primitive) (string, error) { - switch o := p.Value.(type) { + switch o := p.GetValue().(type) { case *idlCore.Primitive_Integer: return fmt.Sprintf("%v", o.Integer), nil case *idlCore.Primitive_Boolean: @@ -189,22 +189,22 @@ func serializePrimitive(p *idlCore.Primitive) (string, error) { case *idlCore.Primitive_StringValue: return o.StringValue, nil default: - return "", fmt.Errorf("received an unexpected primitive type [%v]", reflect.TypeOf(p.Value)) + return "", fmt.Errorf("received an unexpected primitive type [%v]", reflect.TypeOf(p.GetValue())) } } func serializeLiteralScalar(l *idlCore.Scalar) (string, error) { - switch o := l.Value.(type) { + switch o := l.GetValue().(type) { case *idlCore.Scalar_Primitive: return serializePrimitive(o.Primitive) case *idlCore.Scalar_Blob: - return o.Blob.Uri, nil + return o.Blob.GetUri(), nil case *idlCore.Scalar_Schema: - return o.Schema.Uri, nil + return o.Schema.GetUri(), nil case *idlCore.Scalar_Binary: - binaryBytes := o.Binary.Value + binaryBytes := o.Binary.GetValue() var currVal any - if o.Binary.Tag == coreutils.MESSAGEPACK { + if o.Binary.GetTag() == coreutils.MESSAGEPACK { err := msgpack.Unmarshal(binaryBytes, &currVal) if err != nil { return "", fmt.Errorf("failed to unmarshal messagepack bytes with literal:[%v], err:[%v]", l, err) @@ -212,18 +212,18 @@ func serializeLiteralScalar(l *idlCore.Scalar) (string, error) { // TODO: Try to support Primitive_Datetime, Primitive_Duration, Flyte File, and Flyte Directory. return fmt.Sprintf("%v", currVal), nil } - return "", fmt.Errorf("unsupported binary tag [%v]", o.Binary.Tag) + return "", fmt.Errorf("unsupported binary tag [%v]", o.Binary.GetTag()) default: - return "", fmt.Errorf("received an unexpected scalar type [%v]", reflect.TypeOf(l.Value)) + return "", fmt.Errorf("received an unexpected scalar type [%v]", reflect.TypeOf(l.GetValue())) } } func serializeLiteral(ctx context.Context, l *idlCore.Literal) (string, error) { - switch o := l.Value.(type) { + switch o := l.GetValue().(type) { case *idlCore.Literal_Collection: - res := make([]string, 0, len(o.Collection.Literals)) - for _, sub := range o.Collection.Literals { + res := make([]string, 0, len(o.Collection.GetLiterals())) + for _, sub := range o.Collection.GetLiterals() { s, err := serializeLiteral(ctx, sub) if err != nil { return "", err @@ -237,6 +237,6 @@ func serializeLiteral(ctx context.Context, l *idlCore.Literal) (string, error) { return serializeLiteralScalar(o.Scalar) default: logger.Debugf(ctx, "received unexpected primitive type") - return "", fmt.Errorf("received an unexpected primitive type [%v]", reflect.TypeOf(l.Value)) + return "", fmt.Errorf("received an unexpected primitive type [%v]", reflect.TypeOf(l.GetValue())) } } diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/config.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/config.go index eb19015586..adb2d655bb 100644 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/config.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/config.go @@ -64,8 +64,9 @@ var ( DefaultPodTemplateResync: config2.Duration{ Duration: 30 * time.Second, }, - UpdateBaseBackoffDuration: 10, - UpdateBackoffRetries: 5, + UpdateBaseBackoffDuration: 10, + UpdateBackoffRetries: 5, + AddTolerationsForExtendedResources: []string{}, } // K8sPluginConfigSection provides a singular top level config section for all plugins. @@ -214,6 +215,9 @@ type K8sPluginConfig struct { // Number of retries for exponential backoff when updating a resource. UpdateBackoffRetries int `json:"update-backoff-retries" pflag:",Number of retries for exponential backoff when updating a resource."` + + // Extended resources that should be added to the tolerations automatically. + AddTolerationsForExtendedResources []string `json:"add-tolerations-for-extended-resources" pflag:",Name of the extended resources for which tolerations should be added."` } // FlyteCoPilotConfig specifies configuration for the Flyte CoPilot system. FlyteCoPilot, allows running flytekit-less containers diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags.go index 4652d0bfd4..caa485ff39 100755 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags.go @@ -69,5 +69,6 @@ func (cfg K8sPluginConfig) GetPFlagSet(prefix string) *pflag.FlagSet { cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "send-object-events"), defaultK8sConfig.SendObjectEvents, "If true, will send k8s object events in TaskExecutionEvent updates.") cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "update-base-backoff-duration"), defaultK8sConfig.UpdateBaseBackoffDuration, "Initial delay in exponential backoff when updating a resource in milliseconds.") cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "update-backoff-retries"), defaultK8sConfig.UpdateBackoffRetries, "Number of retries for exponential backoff when updating a resource.") + cmdFlags.StringSlice(fmt.Sprintf("%v%v", prefix, "add-tolerations-for-extended-resources"), defaultK8sConfig.AddTolerationsForExtendedResources, "Name of the extended resources for which tolerations should be added.") return cmdFlags } diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags_test.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags_test.go index cc46ffa466..cb50078620 100755 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags_test.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags_test.go @@ -365,4 +365,18 @@ func TestK8sPluginConfig_SetFlags(t *testing.T) { } }) }) + t.Run("Test_add-tolerations-for-extended-resources", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := join_K8sPluginConfig(defaultK8sConfig.AddTolerationsForExtendedResources, ",") + + cmdFlags.Set("add-tolerations-for-extended-resources", testValue) + if vStringSlice, err := cmdFlags.GetStringSlice("add-tolerations-for-extended-resources"); err == nil { + testDecodeRaw_K8sPluginConfig(t, join_K8sPluginConfig(vStringSlice, ","), &actual.AddTolerationsForExtendedResources) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) } diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/container_helper.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/container_helper.go index 32d2e0180e..501798c798 100644 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/container_helper.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/container_helper.go @@ -213,7 +213,7 @@ func BuildRawContainer(ctx context.Context, tCtx pluginscore.TaskExecutionContex containerName = rand.String(4) } - res, err := ToK8sResourceRequirements(taskContainer.Resources) + res, err := ToK8sResourceRequirements(taskContainer.GetResources()) if err != nil { return nil, err } diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/container_helper_test.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/container_helper_test.go index 3b7aa88aeb..4e609c72b2 100644 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/container_helper_test.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/container_helper_test.go @@ -525,7 +525,7 @@ func TestAddFlyteCustomizationsToContainer(t *testing.T) { assert.EqualValues(t, container.Command, []string{"s3://input/path"}) assert.Len(t, container.Resources.Limits, 3) assert.Len(t, container.Resources.Requests, 3) - assert.Len(t, container.Env, 13) + assert.Len(t, container.Env, 12) } func TestAddFlyteCustomizationsToContainer_Resources(t *testing.T) { diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot.go index eaee5bce6c..a5e5e70099 100644 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot.go @@ -20,7 +20,7 @@ import ( ) const ( - flyteSidecarContainerName = "sidecar" + flyteSidecarContainerName = "uploader" flyteInitContainerName = "downloader" ) @@ -162,7 +162,7 @@ func CalculateStorageSize(requirements *v1.ResourceRequirements) *resource.Quant } func AddCoPilotToContainer(ctx context.Context, cfg config.FlyteCoPilotConfig, c *v1.Container, iFace *core.TypedInterface, pilot *core.DataLoadingConfig) error { - if pilot == nil || !pilot.Enabled { + if pilot == nil || !pilot.GetEnabled() { return nil } logger.Infof(ctx, "Enabling CoPilot on main container [%s]", c.Name) @@ -175,7 +175,7 @@ func AddCoPilotToContainer(ctx context.Context, cfg config.FlyteCoPilotConfig, c c.SecurityContext.Capabilities.Add = append(c.SecurityContext.Capabilities.Add, pTraceCapability) if iFace != nil { - if iFace.Inputs != nil && len(iFace.Inputs.Variables) > 0 { + if iFace.GetInputs() != nil && len(iFace.GetInputs().GetVariables()) > 0 { inPath := cfg.DefaultInputDataPath if pilot.GetInputPath() != "" { inPath = pilot.GetInputPath() @@ -187,7 +187,7 @@ func AddCoPilotToContainer(ctx context.Context, cfg config.FlyteCoPilotConfig, c }) } - if iFace.Outputs != nil && len(iFace.Outputs.Variables) > 0 { + if iFace.GetOutputs() != nil && len(iFace.GetOutputs().GetVariables()) > 0 { outPath := cfg.DefaultOutputPath if pilot.GetOutputPath() != "" { outPath = pilot.GetOutputPath() @@ -202,16 +202,17 @@ func AddCoPilotToContainer(ctx context.Context, cfg config.FlyteCoPilotConfig, c } func AddCoPilotToPod(ctx context.Context, cfg config.FlyteCoPilotConfig, coPilotPod *v1.PodSpec, iFace *core.TypedInterface, taskExecMetadata core2.TaskExecutionMetadata, inputPaths io.InputFilePaths, outputPaths io.OutputFilePaths, pilot *core.DataLoadingConfig) (string, error) { - if pilot == nil || !pilot.Enabled { + if pilot == nil || !pilot.GetEnabled() { return "", nil } - logger.Infof(ctx, "CoPilot Enabled for task [%s]", taskExecMetadata.GetTaskExecutionID().GetID().TaskId.Name) + //nolint:protogetter + logger.Infof(ctx, "CoPilot Enabled for task [%s]", taskExecMetadata.GetTaskExecutionID().GetID().TaskId.GetName()) shareProcessNamespaceEnabled := true coPilotPod.ShareProcessNamespace = &shareProcessNamespaceEnabled primaryInitContainerName := "" if iFace != nil { - if iFace.Inputs != nil && len(iFace.Inputs.Variables) > 0 { + if iFace.GetInputs() != nil && len(iFace.GetInputs().GetVariables()) > 0 { inPath := cfg.DefaultInputDataPath if pilot.GetInputPath() != "" { inPath = pilot.GetInputPath() @@ -219,18 +220,19 @@ func AddCoPilotToPod(ctx context.Context, cfg config.FlyteCoPilotConfig, coPilot // TODO we should calculate input volume size based on the size of the inputs which is known ahead of time. We should store that as part of the metadata size := CalculateStorageSize(taskExecMetadata.GetOverrides().GetResources()) - logger.Infof(ctx, "Adding Input path [%s] of Size [%d] for Task [%s]", inPath, size, taskExecMetadata.GetTaskExecutionID().GetID().TaskId.Name) + //nolint:protogetter + logger.Infof(ctx, "Adding Input path [%s] of Size [%d] for Task [%s]", inPath, size, taskExecMetadata.GetTaskExecutionID().GetID().TaskId.GetName()) inputsVolumeMount := v1.VolumeMount{ Name: cfg.InputVolumeName, MountPath: inPath, } - format := pilot.Format + format := pilot.GetFormat() // Lets add the InputsVolume coPilotPod.Volumes = append(coPilotPod.Volumes, DataVolume(cfg.InputVolumeName, size)) // Lets add the Inputs init container - args, err := DownloadCommandArgs(inputPaths.GetInputPath(), outputPaths.GetOutputPrefixPath(), inPath, format, iFace.Inputs) + args, err := DownloadCommandArgs(inputPaths.GetInputPath(), outputPaths.GetOutputPrefixPath(), inPath, format, iFace.GetInputs()) if err != nil { return primaryInitContainerName, err } @@ -242,14 +244,15 @@ func AddCoPilotToPod(ctx context.Context, cfg config.FlyteCoPilotConfig, coPilot primaryInitContainerName = downloader.Name } - if iFace.Outputs != nil && len(iFace.Outputs.Variables) > 0 { + if iFace.GetOutputs() != nil && len(iFace.GetOutputs().GetVariables()) > 0 { outPath := cfg.DefaultOutputPath if pilot.GetOutputPath() != "" { outPath = pilot.GetOutputPath() } size := CalculateStorageSize(taskExecMetadata.GetOverrides().GetResources()) - logger.Infof(ctx, "Adding Output path [%s] of size [%d] for Task [%s]", size, outPath, taskExecMetadata.GetTaskExecutionID().GetID().TaskId.Name) + //nolint:protogetter + logger.Infof(ctx, "Adding Output path [%s] of size [%d] for Task [%s]", size, outPath, taskExecMetadata.GetTaskExecutionID().GetID().TaskId.GetName()) outputsVolumeMount := v1.VolumeMount{ Name: cfg.OutputVolumeName, diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot_test.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot_test.go index aba18c85ac..f989e57567 100644 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot_test.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot_test.go @@ -4,6 +4,7 @@ import ( "context" "encoding/base64" "reflect" + "strings" "testing" "time" @@ -107,6 +108,12 @@ func TestFlyteCoPilotContainer(t *testing.T) { assert.Error(t, err) cfg.Memory = old }) + + t.Run("sidecar-container-name-change", func(t *testing.T) { + c, err := FlyteCoPilotContainer(flyteSidecarContainerName, cfg, []string{"hello"}) + assert.NoError(t, err) + assert.Equal(t, "uploader", strings.Split(c.Name, "-")[1]) + }) } func TestDownloadCommandArgs(t *testing.T) { @@ -132,11 +139,11 @@ func TestDownloadCommandArgs(t *testing.T) { if assert.NoError(t, err) { vm := &core.VariableMap{} assert.NoError(t, proto.Unmarshal(serIFaceBytes, vm)) - assert.Len(t, vm.Variables, 2) - for k, v := range iFace.Variables { - v2, ok := vm.Variables[k] + assert.Len(t, vm.GetVariables(), 2) + for k, v := range iFace.GetVariables() { + v2, ok := vm.GetVariables()[k] assert.True(t, ok) - assert.Equal(t, v.Type.GetSimple(), v2.Type.GetSimple(), "for %s, types do not match", k) + assert.Equal(t, v.GetType().GetSimple(), v2.GetType().GetSimple(), "for %s, types do not match", k) } } } @@ -167,11 +174,11 @@ func TestSidecarCommandArgs(t *testing.T) { if assert.NoError(t, err) { if2 := &core.TypedInterface{} assert.NoError(t, proto.Unmarshal(serIFaceBytes, if2)) - assert.Len(t, if2.Outputs.Variables, 2) - for k, v := range iFace.Outputs.Variables { - v2, ok := if2.Outputs.Variables[k] + assert.Len(t, if2.GetOutputs().GetVariables(), 2) + for k, v := range iFace.GetOutputs().GetVariables() { + v2, ok := if2.GetOutputs().GetVariables()[k] assert.True(t, ok) - assert.Equal(t, v.Type.GetSimple(), v2.Type.GetSimple(), "for %s, types do not match", k) + assert.Equal(t, v.GetType().GetSimple(), v2.GetType().GetSimple(), "for %s, types do not match", k) } } } @@ -196,20 +203,20 @@ func assertContainerHasVolumeMounts(t *testing.T, cfg config.FlyteCoPilotConfig, for _, v := range c.VolumeMounts { vmap[v.Name] = v } - if iFace.Inputs != nil { + if iFace.GetInputs() != nil { path := cfg.DefaultInputDataPath - if pilot.InputPath != "" { - path = pilot.InputPath + if pilot.GetInputPath() != "" { + path = pilot.GetInputPath() } v, found := vmap[cfg.InputVolumeName] assert.Equal(t, path, v.MountPath, "Input Path does not match") assert.True(t, found, "Input volume mount expected but not found!") } - if iFace.Outputs != nil { + if iFace.GetOutputs() != nil { path := cfg.DefaultOutputPath - if pilot.OutputPath != "" { - path = pilot.OutputPath + if pilot.GetOutputPath() != "" { + path = pilot.GetOutputPath() } v, found := vmap[cfg.OutputVolumeName] assert.Equal(t, path, v.MountPath, "Output Path does not match") @@ -260,10 +267,10 @@ func assertPodHasCoPilot(t *testing.T, cfg config.FlyteCoPilotConfig, pilot *cor for _, v := range c.VolumeMounts { vmap[v.Name] = v } - if iFace.Inputs != nil { + if iFace.GetInputs() != nil { path := cfg.DefaultInputDataPath if pilot != nil { - path = pilot.InputPath + path = pilot.GetInputPath() } v, found := vmap[cfg.InputVolumeName] if c.Name == cfg.NamePrefix+flyteInitContainerName { @@ -274,10 +281,10 @@ func assertPodHasCoPilot(t *testing.T, cfg config.FlyteCoPilotConfig, pilot *cor } } - if iFace.Outputs != nil { + if iFace.GetOutputs() != nil { path := cfg.DefaultOutputPath if pilot != nil { - path = pilot.OutputPath + path = pilot.GetOutputPath() } v, found := vmap[cfg.OutputVolumeName] if c.Name == cfg.NamePrefix+flyteInitContainerName { diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go index 3cd000dd40..df74771961 100644 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go @@ -12,6 +12,7 @@ import ( pluginsCore "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/core" "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" + propellerCfg "github.com/flyteorg/flyte/flytepropeller/pkg/controller/config" "github.com/flyteorg/flyte/flytestdlib/contextutils" ) @@ -43,105 +44,115 @@ func GetContextEnvVars(ownerCtx context.Context) []v1.EnvVar { func GetExecutionEnvVars(id pluginsCore.TaskExecutionID, consoleURL string) []v1.EnvVar { - if id == nil || id.GetID().NodeExecutionId == nil || id.GetID().NodeExecutionId.ExecutionId == nil { + //nolint:protogetter + if id == nil || id.GetID().NodeExecutionId == nil || id.GetID().NodeExecutionId.GetExecutionId() == nil { return []v1.EnvVar{} } // Execution level env variables. - nodeExecutionID := id.GetID().NodeExecutionId.ExecutionId - attemptNumber := strconv.Itoa(int(id.GetID().RetryAttempt)) + nodeExecutionID := id.GetID().NodeExecutionId.GetExecutionId() //nolint:protogetter + attemptNumber := strconv.Itoa(int(id.GetID().RetryAttempt)) //nolint:protogetter envVars := []v1.EnvVar{ { Name: "FLYTE_INTERNAL_EXECUTION_ID", - Value: nodeExecutionID.Name, + Value: nodeExecutionID.GetName(), }, { Name: "FLYTE_INTERNAL_EXECUTION_PROJECT", - Value: nodeExecutionID.Project, + Value: nodeExecutionID.GetProject(), }, { Name: "FLYTE_INTERNAL_EXECUTION_DOMAIN", - Value: nodeExecutionID.Domain, - }, - { - // FLYTE_INTERNAL_POD_NAME - Name: "_F_PN", - ValueFrom: &v1.EnvVarSource{ - FieldRef: &v1.ObjectFieldSelector{ - FieldPath: "metadata.name", - }, - }, + Value: nodeExecutionID.GetDomain(), }, { Name: "FLYTE_ATTEMPT_NUMBER", Value: attemptNumber, }, - // TODO: Fill in these - // { - // Name: "FLYTE_INTERNAL_EXECUTION_WORKFLOW", - // Value: "", - // }, - // { - // Name: "FLYTE_INTERNAL_EXECUTION_LAUNCHPLAN", - // Value: "", - // }, } if len(consoleURL) > 0 { consoleURL = strings.TrimRight(consoleURL, "/") envVars = append(envVars, v1.EnvVar{ Name: flyteExecutionURL, - Value: fmt.Sprintf("%s/projects/%s/domains/%s/executions/%s/nodeId/%s/nodes", consoleURL, nodeExecutionID.Project, nodeExecutionID.Domain, nodeExecutionID.Name, id.GetUniqueNodeID()), + Value: fmt.Sprintf("%s/projects/%s/domains/%s/executions/%s/nodeId/%s/nodes", consoleURL, nodeExecutionID.GetProject(), nodeExecutionID.GetDomain(), nodeExecutionID.GetName(), id.GetUniqueNodeID()), }) } // Task definition Level env variables. - if id.GetID().TaskId != nil { - taskID := id.GetID().TaskId + if id.GetID().TaskId != nil { //nolint:protogetter + taskID := id.GetID().TaskId //nolint:protogetter envVars = append(envVars, v1.EnvVar{ Name: "FLYTE_INTERNAL_TASK_PROJECT", - Value: taskID.Project, + Value: taskID.GetProject(), }, v1.EnvVar{ Name: "FLYTE_INTERNAL_TASK_DOMAIN", - Value: taskID.Domain, + Value: taskID.GetDomain(), }, v1.EnvVar{ Name: "FLYTE_INTERNAL_TASK_NAME", - Value: taskID.Name, + Value: taskID.GetName(), }, v1.EnvVar{ Name: "FLYTE_INTERNAL_TASK_VERSION", - Value: taskID.Version, + Value: taskID.GetVersion(), }, // Historic Task Definition Level env variables. // Remove these once SDK is migrated to use the new ones. v1.EnvVar{ Name: "FLYTE_INTERNAL_PROJECT", - Value: taskID.Project, + Value: taskID.GetProject(), }, v1.EnvVar{ Name: "FLYTE_INTERNAL_DOMAIN", - Value: taskID.Domain, + Value: taskID.GetDomain(), }, v1.EnvVar{ Name: "FLYTE_INTERNAL_NAME", - Value: taskID.Name, + Value: taskID.GetName(), }, v1.EnvVar{ Name: "FLYTE_INTERNAL_VERSION", - Value: taskID.Version, + Value: taskID.GetVersion(), }) } return envVars } +func GetLiteralOffloadingEnvVars() []v1.EnvVar { + propellerConfig := propellerCfg.GetConfig() + if !propellerConfig.LiteralOffloadingConfig.Enabled { + return []v1.EnvVar{} + } + + envVars := []v1.EnvVar{} + if propellerConfig.LiteralOffloadingConfig.MinSizeInMBForOffloading > 0 { + envVars = append(envVars, + v1.EnvVar{ + Name: "_F_L_MIN_SIZE_MB", + Value: strconv.FormatInt(propellerConfig.LiteralOffloadingConfig.MinSizeInMBForOffloading, 10), + }, + ) + } + if propellerConfig.LiteralOffloadingConfig.MaxSizeInMBForOffloading > 0 { + envVars = append(envVars, + v1.EnvVar{ + Name: "_F_L_MAX_SIZE_MB", + Value: strconv.FormatInt(propellerConfig.LiteralOffloadingConfig.MaxSizeInMBForOffloading, 10), + }, + ) + } + return envVars +} + func DecorateEnvVars(ctx context.Context, envVars []v1.EnvVar, envFroms []v1.EnvFromSource, taskEnvironmentVariables map[string]string, id pluginsCore.TaskExecutionID, consoleURL string) ([]v1.EnvVar, []v1.EnvFromSource) { envVars = append(envVars, GetContextEnvVars(ctx)...) envVars = append(envVars, GetExecutionEnvVars(id, consoleURL)...) + envVars = append(envVars, GetLiteralOffloadingEnvVars()...) for k, v := range taskEnvironmentVariables { envVars = append(envVars, v1.EnvVar{Name: k, Value: v}) diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds_test.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds_test.go index fd4828fbbd..0ed5fc0337 100644 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds_test.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds_test.go @@ -14,6 +14,7 @@ import ( "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" pluginsCore "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/core" "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" + propellerCfg "github.com/flyteorg/flyte/flytepropeller/pkg/controller/config" "github.com/flyteorg/flyte/flytestdlib/contextutils" ) @@ -27,13 +28,13 @@ func TestGetExecutionEnvVars(t *testing.T) { }{ { "no-console-url", - 13, + 12, "", nil, }, { "with-console-url", - 14, + 13, "scheme://host/path", &v12.EnvVar{ Name: "FLYTE_EXECUTION_URL", @@ -42,7 +43,7 @@ func TestGetExecutionEnvVars(t *testing.T) { }, { "with-console-url-ending-in-single-slash", - 14, + 13, "scheme://host/path/", &v12.EnvVar{ Name: "FLYTE_EXECUTION_URL", @@ -51,7 +52,7 @@ func TestGetExecutionEnvVars(t *testing.T) { }, { "with-console-url-ending-in-multiple-slashes", - 14, + 13, "scheme://host/path////", &v12.EnvVar{ Name: "FLYTE_EXECUTION_URL", @@ -63,7 +64,7 @@ func TestGetExecutionEnvVars(t *testing.T) { envVars := GetExecutionEnvVars(mock, tt.consoleURL) assert.Len(t, envVars, tt.expectedEnvVars) if tt.expectedEnvVar != nil { - assert.True(t, proto.Equal(&envVars[5], tt.expectedEnvVar)) + assert.True(t, proto.Equal(&envVars[4], tt.expectedEnvVar)) } } } @@ -304,6 +305,8 @@ func TestDecorateEnvVars(t *testing.T) { expected := append(defaultEnv, GetContextEnvVars(ctx)...) expected = append(expected, GetExecutionEnvVars(mockTaskExecutionIdentifier{}, "")...) + expectedOffloaded := append(expected, v12.EnvVar{Name: "_F_L_MIN_SIZE_MB", Value: "1"}) + expectedOffloaded = append(expectedOffloaded, v12.EnvVar{Name: "_F_L_MAX_SIZE_MB", Value: "42"}) aggregated := append(expected, v12.EnvVar{Name: "k", Value: "v"}) type args struct { @@ -315,17 +318,77 @@ func TestDecorateEnvVars(t *testing.T) { args args additionEnvVar map[string]string additionEnvVarFromEnv map[string]string + offloadingEnabled bool + offloadingEnvVar map[string]string executionEnvVar map[string]string consoleURL string want []v12.EnvVar }{ - {"no-additional", args{envVars: defaultEnv, id: mockTaskExecutionIdentifier{}}, emptyEnvVar, emptyEnvVar, emptyEnvVar, "", expected}, - {"with-additional", args{envVars: defaultEnv, id: mockTaskExecutionIdentifier{}}, additionalEnv, emptyEnvVar, emptyEnvVar, "", aggregated}, - {"from-env", args{envVars: defaultEnv, id: mockTaskExecutionIdentifier{}}, emptyEnvVar, envVarsFromEnv, emptyEnvVar, "", aggregated}, - {"from-execution-metadata", args{envVars: defaultEnv, id: mockTaskExecutionIdentifier{}}, emptyEnvVar, emptyEnvVar, additionalEnv, "", aggregated}, + { + "no-additional", + args{envVars: defaultEnv, id: mockTaskExecutionIdentifier{}}, + emptyEnvVar, + emptyEnvVar, + false, + emptyEnvVar, + emptyEnvVar, + "", + expected, + }, + { + "no-additional-offloading-enabled", + args{envVars: defaultEnv, id: mockTaskExecutionIdentifier{}}, + emptyEnvVar, + emptyEnvVar, + true, + emptyEnvVar, + emptyEnvVar, + "", + expectedOffloaded, + }, + { + "with-additional", + args{envVars: defaultEnv, id: mockTaskExecutionIdentifier{}}, + additionalEnv, + emptyEnvVar, + false, + emptyEnvVar, + emptyEnvVar, + "", + aggregated, + }, + { + "from-env", + args{envVars: defaultEnv, id: mockTaskExecutionIdentifier{}}, + emptyEnvVar, + envVarsFromEnv, + false, + emptyEnvVar, + emptyEnvVar, + "", + aggregated, + }, + { + "from-execution-metadata", + args{envVars: defaultEnv, id: mockTaskExecutionIdentifier{}}, + emptyEnvVar, + emptyEnvVar, + false, + emptyEnvVar, + additionalEnv, + "", + aggregated, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + cfg := propellerCfg.GetConfig() + cfg.LiteralOffloadingConfig = propellerCfg.LiteralOffloadingConfig{ + Enabled: tt.offloadingEnabled, + MinSizeInMBForOffloading: 1, + MaxSizeInMBForOffloading: 42, + } + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ DefaultEnvVars: tt.additionEnvVar, DefaultEnvVarsFromEnv: tt.additionEnvVarFromEnv, diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper.go index 229f963968..6beca78f54 100644 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper.go @@ -11,6 +11,7 @@ import ( "github.com/imdario/mergo" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" pluginserrors "github.com/flyteorg/flyte/flyteplugins/go/tasks/errors" @@ -287,15 +288,15 @@ func BuildRawPod(ctx context.Context, tCtx pluginsCore.TaskExecutionContext) (*v } case *core.TaskTemplate_K8SPod: // handles pod tasks that marshal the pod spec to the k8s_pod task target. - if target.K8SPod.PodSpec == nil { + if target.K8SPod.GetPodSpec() == nil { return nil, nil, "", pluginserrors.Errorf(pluginserrors.BadTaskSpecification, "Pod tasks with task type version > 1 should specify their target as a K8sPod with a defined pod spec") } - err := utils.UnmarshalStructToObj(target.K8SPod.PodSpec, &podSpec) + err := utils.UnmarshalStructToObj(target.K8SPod.GetPodSpec(), &podSpec) if err != nil { return nil, nil, "", pluginserrors.Errorf(pluginserrors.BadTaskSpecification, - "Unable to unmarshal task k8s pod [%v], Err: [%v]", target.K8SPod.PodSpec, err.Error()) + "Unable to unmarshal task k8s pod [%v], Err: [%v]", target.K8SPod.GetPodSpec(), err.Error()) } // get primary container name @@ -306,9 +307,9 @@ func BuildRawPod(ctx context.Context, tCtx pluginsCore.TaskExecutionContext) (*v } // update annotations and labels - if taskTemplate.GetK8SPod().Metadata != nil { - mergeMapInto(target.K8SPod.Metadata.Annotations, objectMeta.Annotations) - mergeMapInto(target.K8SPod.Metadata.Labels, objectMeta.Labels) + if taskTemplate.GetK8SPod().GetMetadata() != nil { + mergeMapInto(target.K8SPod.GetMetadata().GetAnnotations(), objectMeta.Annotations) + mergeMapInto(target.K8SPod.GetMetadata().GetLabels(), objectMeta.Labels) } default: return nil, nil, "", pluginserrors.Errorf(pluginserrors.BadTaskSpecification, @@ -393,7 +394,7 @@ func ApplyFlytePodConfiguration(ctx context.Context, tCtx pluginsCore.TaskExecut if dataLoadingConfig != nil { if err := AddCoPilotToContainer(ctx, config.GetK8sPluginConfig().CoPilot, - primaryContainer, taskTemplate.Interface, dataLoadingConfig); err != nil { + primaryContainer, taskTemplate.GetInterface(), dataLoadingConfig); err != nil { return nil, nil, err } @@ -445,6 +446,54 @@ func ApplyContainerImageOverride(podSpec *v1.PodSpec, containerImage string, pri } } +func addTolerationInPodSpec(podSpec *v1.PodSpec, toleration *v1.Toleration) *v1.PodSpec { + podTolerations := podSpec.Tolerations + + var newTolerations []v1.Toleration + for i := range podTolerations { + if toleration.MatchToleration(&podTolerations[i]) { + return podSpec + } + newTolerations = append(newTolerations, podTolerations[i]) + } + newTolerations = append(newTolerations, *toleration) + podSpec.Tolerations = newTolerations + return podSpec +} + +func AddTolerationsForExtendedResources(podSpec *v1.PodSpec) *v1.PodSpec { + if podSpec == nil { + podSpec = &v1.PodSpec{} + } + + resources := sets.NewString() + for _, container := range podSpec.Containers { + for _, extendedResource := range config.GetK8sPluginConfig().AddTolerationsForExtendedResources { + if _, ok := container.Resources.Requests[v1.ResourceName(extendedResource)]; ok { + resources.Insert(extendedResource) + } + } + } + + for _, container := range podSpec.InitContainers { + for _, extendedResource := range config.GetK8sPluginConfig().AddTolerationsForExtendedResources { + if _, ok := container.Resources.Requests[v1.ResourceName(extendedResource)]; ok { + resources.Insert(extendedResource) + } + } + } + + for _, resource := range resources.List() { + addTolerationInPodSpec(podSpec, &v1.Toleration{ + Key: resource, + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + }) + } + + return podSpec +} + // ToK8sPodSpec builds a PodSpec and ObjectMeta based on the definition passed by the TaskExecutionContext. This // involves parsing the raw PodSpec definition and applying all Flyte configuration options. func ToK8sPodSpec(ctx context.Context, tCtx pluginsCore.TaskExecutionContext) (*v1.PodSpec, *metav1.ObjectMeta, string, error) { @@ -460,6 +509,8 @@ func ToK8sPodSpec(ctx context.Context, tCtx pluginsCore.TaskExecutionContext) (* return nil, nil, "", err } + podSpec = AddTolerationsForExtendedResources(podSpec) + return podSpec, objectMeta, primaryContainerName, nil } @@ -483,11 +534,11 @@ func getBasePodTemplate(ctx context.Context, tCtx pluginsCore.TaskExecutionConte } var podTemplate *v1.PodTemplate - if taskTemplate.Metadata != nil && len(taskTemplate.Metadata.PodTemplateName) > 0 { + if taskTemplate.GetMetadata() != nil && len(taskTemplate.GetMetadata().GetPodTemplateName()) > 0 { // retrieve PodTemplate by name from PodTemplateStore - podTemplate = podTemplateStore.LoadOrDefault(tCtx.TaskExecutionMetadata().GetNamespace(), taskTemplate.Metadata.PodTemplateName) + podTemplate = podTemplateStore.LoadOrDefault(tCtx.TaskExecutionMetadata().GetNamespace(), taskTemplate.GetMetadata().GetPodTemplateName()) if podTemplate == nil { - return nil, pluginserrors.Errorf(pluginserrors.BadTaskSpecification, "PodTemplate '%s' does not exist", taskTemplate.Metadata.PodTemplateName) + return nil, pluginserrors.Errorf(pluginserrors.BadTaskSpecification, "PodTemplate '%s' does not exist", taskTemplate.GetMetadata().GetPodTemplateName()) } } else { // check for default PodTemplate diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper_test.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper_test.go index 9797b5e05b..0a70cdd895 100644 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper_test.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper_test.go @@ -3,6 +3,7 @@ package flytek8s import ( "context" "encoding/json" + "fmt" "io/ioutil" "path/filepath" "reflect" @@ -1529,7 +1530,7 @@ func TestDemystifyPendingTimeout(t *testing.T) { taskStatus, err := DemystifyPending(s, pluginsCore.TaskInfo{}) assert.NoError(t, err) assert.Equal(t, pluginsCore.PhaseRetryableFailure, taskStatus.Phase()) - assert.Equal(t, "PodPendingTimeout", taskStatus.Err().Code) + assert.Equal(t, "PodPendingTimeout", taskStatus.Err().GetCode()) assert.True(t, taskStatus.CleanupOnFailure()) }) } @@ -1549,7 +1550,7 @@ func TestDemystifySuccess(t *testing.T) { }, pluginsCore.TaskInfo{}) assert.Nil(t, err) assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) - assert.Equal(t, "OOMKilled", phaseInfo.Err().Code) + assert.Equal(t, "OOMKilled", phaseInfo.Err().GetCode()) }) t.Run("InitContainer OOMKilled", func(t *testing.T) { @@ -1566,7 +1567,7 @@ func TestDemystifySuccess(t *testing.T) { }, pluginsCore.TaskInfo{}) assert.Nil(t, err) assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) - assert.Equal(t, "OOMKilled", phaseInfo.Err().Code) + assert.Equal(t, "OOMKilled", phaseInfo.Err().GetCode()) }) t.Run("success", func(t *testing.T) { @@ -1581,16 +1582,16 @@ func TestDemystifyFailure(t *testing.T) { phaseInfo, err := DemystifyFailure(v1.PodStatus{}, pluginsCore.TaskInfo{}) assert.Nil(t, err) assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) - assert.Equal(t, "UnknownError", phaseInfo.Err().Code) - assert.Equal(t, core.ExecutionError_USER, phaseInfo.Err().Kind) + assert.Equal(t, "UnknownError", phaseInfo.Err().GetCode()) + assert.Equal(t, core.ExecutionError_USER, phaseInfo.Err().GetKind()) }) t.Run("known-error", func(t *testing.T) { phaseInfo, err := DemystifyFailure(v1.PodStatus{Reason: "hello"}, pluginsCore.TaskInfo{}) assert.Nil(t, err) assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) - assert.Equal(t, "hello", phaseInfo.Err().Code) - assert.Equal(t, core.ExecutionError_USER, phaseInfo.Err().Kind) + assert.Equal(t, "hello", phaseInfo.Err().GetCode()) + assert.Equal(t, core.ExecutionError_USER, phaseInfo.Err().GetKind()) }) t.Run("OOMKilled", func(t *testing.T) { @@ -1608,8 +1609,8 @@ func TestDemystifyFailure(t *testing.T) { }, pluginsCore.TaskInfo{}) assert.Nil(t, err) assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) - assert.Equal(t, "OOMKilled", phaseInfo.Err().Code) - assert.Equal(t, core.ExecutionError_USER, phaseInfo.Err().Kind) + assert.Equal(t, "OOMKilled", phaseInfo.Err().GetCode()) + assert.Equal(t, core.ExecutionError_USER, phaseInfo.Err().GetKind()) }) t.Run("SIGKILL", func(t *testing.T) { @@ -1627,8 +1628,8 @@ func TestDemystifyFailure(t *testing.T) { }, pluginsCore.TaskInfo{}) assert.Nil(t, err) assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) - assert.Equal(t, "Interrupted", phaseInfo.Err().Code) - assert.Equal(t, core.ExecutionError_USER, phaseInfo.Err().Kind) + assert.Equal(t, "Interrupted", phaseInfo.Err().GetCode()) + assert.Equal(t, core.ExecutionError_USER, phaseInfo.Err().GetKind()) }) t.Run("GKE kubelet graceful node shutdown", func(t *testing.T) { @@ -1649,9 +1650,9 @@ func TestDemystifyFailure(t *testing.T) { }, pluginsCore.TaskInfo{}) assert.Nil(t, err) assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) - assert.Equal(t, "Interrupted", phaseInfo.Err().Code) - assert.Equal(t, core.ExecutionError_SYSTEM, phaseInfo.Err().Kind) - assert.Contains(t, phaseInfo.Err().Message, containerReason) + assert.Equal(t, "Interrupted", phaseInfo.Err().GetCode()) + assert.Equal(t, core.ExecutionError_SYSTEM, phaseInfo.Err().GetKind()) + assert.Contains(t, phaseInfo.Err().GetMessage(), containerReason) }) t.Run("GKE kubelet graceful node shutdown", func(t *testing.T) { @@ -1672,9 +1673,9 @@ func TestDemystifyFailure(t *testing.T) { }, pluginsCore.TaskInfo{}) assert.Nil(t, err) assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) - assert.Equal(t, "Interrupted", phaseInfo.Err().Code) - assert.Equal(t, core.ExecutionError_SYSTEM, phaseInfo.Err().Kind) - assert.Contains(t, phaseInfo.Err().Message, containerReason) + assert.Equal(t, "Interrupted", phaseInfo.Err().GetCode()) + assert.Equal(t, core.ExecutionError_SYSTEM, phaseInfo.Err().GetKind()) + assert.Contains(t, phaseInfo.Err().GetMessage(), containerReason) }) } @@ -1705,8 +1706,8 @@ func TestDemystifyPending_testcases(t *testing.T) { assert.NotNil(t, p) assert.Equal(t, p.Phase(), pluginsCore.PhaseRetryableFailure) if assert.NotNil(t, p.Err()) { - assert.Equal(t, p.Err().Code, tt.errCode) - assert.Equal(t, p.Err().Message, tt.message) + assert.Equal(t, p.Err().GetCode(), tt.errCode) + assert.Equal(t, p.Err().GetMessage(), tt.message) } } } @@ -1765,8 +1766,8 @@ func TestDeterminePrimaryContainerPhase(t *testing.T) { }, }, info) assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) - assert.Equal(t, "foo", phaseInfo.Err().Code) - assert.Equal(t, "\r\n[primary] terminated with exit code (1). Reason [foo]. Message: \nfoo failed.", phaseInfo.Err().Message) + assert.Equal(t, "foo", phaseInfo.Err().GetCode()) + assert.Equal(t, "\r\n[primary] terminated with exit code (1). Reason [foo]. Message: \nfoo failed.", phaseInfo.Err().GetMessage()) }) t.Run("primary container succeeded", func(t *testing.T) { phaseInfo := DeterminePrimaryContainerPhase(primaryContainerName, []v1.ContainerStatus{ @@ -1786,8 +1787,8 @@ func TestDeterminePrimaryContainerPhase(t *testing.T) { secondaryContainer, }, info) assert.Equal(t, pluginsCore.PhasePermanentFailure, phaseInfo.Phase()) - assert.Equal(t, PrimaryContainerNotFound, phaseInfo.Err().Code) - assert.Equal(t, "Primary container [primary] not found in pod's container statuses", phaseInfo.Err().Message) + assert.Equal(t, PrimaryContainerNotFound, phaseInfo.Err().GetCode()) + assert.Equal(t, "Primary container [primary] not found in pod's container statuses", phaseInfo.Err().GetMessage()) }) t.Run("primary container failed with OOMKilled", func(t *testing.T) { phaseInfo := DeterminePrimaryContainerPhase(primaryContainerName, []v1.ContainerStatus{ @@ -1803,8 +1804,8 @@ func TestDeterminePrimaryContainerPhase(t *testing.T) { }, }, info) assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) - assert.Equal(t, OOMKilled, phaseInfo.Err().Code) - assert.Equal(t, "\r\n[primary] terminated with exit code (0). Reason [OOMKilled]. Message: \nfoo failed.", phaseInfo.Err().Message) + assert.Equal(t, OOMKilled, phaseInfo.Err().GetCode()) + assert.Equal(t, "\r\n[primary] terminated with exit code (0). Reason [OOMKilled]. Message: \nfoo failed.", phaseInfo.Err().GetMessage()) }) } @@ -2244,3 +2245,112 @@ func TestAddFlyteCustomizationsToContainer_SetConsoleUrl(t *testing.T) { }) } } + +func TestAddTolerationsForExtendedResources(t *testing.T) { + gpuResourceName := v1.ResourceName("nvidia.com/gpu") + addTolerationResourceName := v1.ResourceName("foo/bar") + noTolerationResourceName := v1.ResourceName("foo/baz") + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuResourceName: gpuResourceName, + AddTolerationsForExtendedResources: []string{ + gpuResourceName.String(), + addTolerationResourceName.String(), + }, + })) + + podSpec := &v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + gpuResourceName: resource.MustParse("1"), + addTolerationResourceName: resource.MustParse("1"), + noTolerationResourceName: resource.MustParse("1"), + }, + }, + }, + }, + Tolerations: []v1.Toleration{ + { + Key: "foo", + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + }, + }, + } + + podSpec = AddTolerationsForExtendedResources(podSpec) + fmt.Printf("%v\n", podSpec.Tolerations) + assert.Equal(t, 3, len(podSpec.Tolerations)) + assert.Equal(t, addTolerationResourceName.String(), podSpec.Tolerations[1].Key) + assert.Equal(t, v1.TolerationOpExists, podSpec.Tolerations[1].Operator) + assert.Equal(t, v1.TaintEffectNoSchedule, podSpec.Tolerations[1].Effect) + assert.Equal(t, gpuResourceName.String(), podSpec.Tolerations[2].Key) + assert.Equal(t, v1.TolerationOpExists, podSpec.Tolerations[2].Operator) + assert.Equal(t, v1.TaintEffectNoSchedule, podSpec.Tolerations[2].Effect) + + podSpec = &v1.PodSpec{ + InitContainers: []v1.Container{ + v1.Container{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + gpuResourceName: resource.MustParse("1"), + addTolerationResourceName: resource.MustParse("1"), + noTolerationResourceName: resource.MustParse("1"), + }, + }, + }, + }, + Tolerations: []v1.Toleration{ + { + Key: "foo", + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + }, + }, + } + + podSpec = AddTolerationsForExtendedResources(podSpec) + assert.Equal(t, 3, len(podSpec.Tolerations)) + assert.Equal(t, addTolerationResourceName.String(), podSpec.Tolerations[1].Key) + assert.Equal(t, v1.TolerationOpExists, podSpec.Tolerations[1].Operator) + assert.Equal(t, v1.TaintEffectNoSchedule, podSpec.Tolerations[1].Effect) + assert.Equal(t, gpuResourceName.String(), podSpec.Tolerations[2].Key) + assert.Equal(t, v1.TolerationOpExists, podSpec.Tolerations[2].Operator) + assert.Equal(t, v1.TaintEffectNoSchedule, podSpec.Tolerations[2].Effect) + + podSpec = &v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + gpuResourceName: resource.MustParse("1"), + addTolerationResourceName: resource.MustParse("1"), + noTolerationResourceName: resource.MustParse("1"), + }, + }, + }, + }, + Tolerations: []v1.Toleration{ + { + Key: "foo", + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + }, + { + Key: gpuResourceName.String(), + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + }, + }, + } + + podSpec = AddTolerationsForExtendedResources(podSpec) + assert.Equal(t, 3, len(podSpec.Tolerations)) + assert.Equal(t, gpuResourceName.String(), podSpec.Tolerations[1].Key) + assert.Equal(t, v1.TolerationOpExists, podSpec.Tolerations[1].Operator) + assert.Equal(t, v1.TaintEffectNoSchedule, podSpec.Tolerations[1].Effect) + assert.Equal(t, addTolerationResourceName.String(), podSpec.Tolerations[2].Key) + assert.Equal(t, v1.TolerationOpExists, podSpec.Tolerations[2].Operator) + assert.Equal(t, v1.TaintEffectNoSchedule, podSpec.Tolerations[2].Effect) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/utils.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/utils.go index ef7807aadd..fab4f84997 100644 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/utils.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/utils.go @@ -12,7 +12,7 @@ import ( func ToK8sEnvVar(env []*core.KeyValuePair) []v1.EnvVar { envVars := make([]v1.EnvVar, 0, len(env)) for _, kv := range env { - envVars = append(envVars, v1.EnvVar{Name: kv.Key, Value: kv.Value}) + envVars = append(envVars, v1.EnvVar{Name: kv.GetKey(), Value: kv.GetValue()}) } return envVars } @@ -22,12 +22,12 @@ func ToK8sEnvVar(env []*core.KeyValuePair) []v1.EnvVar { func ToK8sResourceList(resources []*core.Resources_ResourceEntry) (v1.ResourceList, error) { k8sResources := make(v1.ResourceList, len(resources)) for _, r := range resources { - rVal := r.Value + rVal := r.GetValue() v, err := resource.ParseQuantity(rVal) if err != nil { return nil, errors.Wrap(err, "Failed to parse resource as a valid quantity.") } - switch r.Name { + switch r.GetName() { case core.Resources_CPU: if !v.IsZero() { k8sResources[v1.ResourceCPU] = v @@ -54,11 +54,11 @@ func ToK8sResourceRequirements(resources *core.Resources) (*v1.ResourceRequireme if resources == nil { return res, nil } - req, err := ToK8sResourceList(resources.Requests) + req, err := ToK8sResourceList(resources.GetRequests()) if err != nil { return res, err } - lim, err := ToK8sResourceList(resources.Limits) + lim, err := ToK8sResourceList(resources.GetLimits()) if err != nil { return res, err } diff --git a/flyteplugins/go/tasks/pluginmachinery/internal/webapi/cache.go b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/cache.go index 7569abd90e..b9efcd7372 100644 --- a/flyteplugins/go/tasks/pluginmachinery/internal/webapi/cache.go +++ b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/cache.go @@ -179,7 +179,7 @@ func NewResourceCache(ctx context.Context, name string, client Client, cfg webap workqueue.NewMaxOfRateLimiter( workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second), &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(rateCfg.QPS), rateCfg.Burst)}, - ), cfg.ResyncInterval.Duration, cfg.Workers, cfg.Size, + ), cfg.ResyncInterval.Duration, uint(cfg.Workers), uint(cfg.Size), // #nosec G115 scope.NewSubScope("cache")) if err != nil { diff --git a/flyteplugins/go/tasks/pluginmachinery/io/mocks/error_reader.go b/flyteplugins/go/tasks/pluginmachinery/io/mocks/error_reader.go new file mode 100644 index 0000000000..161e863f49 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/io/mocks/error_reader.go @@ -0,0 +1,93 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + io "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/io" + mock "github.com/stretchr/testify/mock" +) + +// ErrorReader is an autogenerated mock type for the ErrorReader type +type ErrorReader struct { + mock.Mock +} + +type ErrorReader_IsError struct { + *mock.Call +} + +func (_m ErrorReader_IsError) Return(_a0 bool, _a1 error) *ErrorReader_IsError { + return &ErrorReader_IsError{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *ErrorReader) OnIsError(ctx context.Context) *ErrorReader_IsError { + c_call := _m.On("IsError", ctx) + return &ErrorReader_IsError{Call: c_call} +} + +func (_m *ErrorReader) OnIsErrorMatch(matchers ...interface{}) *ErrorReader_IsError { + c_call := _m.On("IsError", matchers...) + return &ErrorReader_IsError{Call: c_call} +} + +// IsError provides a mock function with given fields: ctx +func (_m *ErrorReader) IsError(ctx context.Context) (bool, error) { + ret := _m.Called(ctx) + + var r0 bool + if rf, ok := ret.Get(0).(func(context.Context) bool); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(bool) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type ErrorReader_ReadError struct { + *mock.Call +} + +func (_m ErrorReader_ReadError) Return(_a0 io.ExecutionError, _a1 error) *ErrorReader_ReadError { + return &ErrorReader_ReadError{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *ErrorReader) OnReadError(ctx context.Context) *ErrorReader_ReadError { + c_call := _m.On("ReadError", ctx) + return &ErrorReader_ReadError{Call: c_call} +} + +func (_m *ErrorReader) OnReadErrorMatch(matchers ...interface{}) *ErrorReader_ReadError { + c_call := _m.On("ReadError", matchers...) + return &ErrorReader_ReadError{Call: c_call} +} + +// ReadError provides a mock function with given fields: ctx +func (_m *ErrorReader) ReadError(ctx context.Context) (io.ExecutionError, error) { + ret := _m.Called(ctx) + + var r0 io.ExecutionError + if rf, ok := ret.Get(0).(func(context.Context) io.ExecutionError); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(io.ExecutionError) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/in_memory_output_reader_test.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/in_memory_output_reader_test.go index ad82fca8a3..52b58b732d 100644 --- a/flyteplugins/go/tasks/pluginmachinery/ioutils/in_memory_output_reader_test.go +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/in_memory_output_reader_test.go @@ -39,7 +39,7 @@ func TestInMemoryOutputReader(t *testing.T) { assert.NoError(t, err) literalMap, executionErr, err := or.Read(ctx) - assert.Equal(t, lt, literalMap.Literals) + assert.Equal(t, lt, literalMap.GetLiterals()) assert.Nil(t, executionErr) assert.NoError(t, err) } diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/precomputed_shardselector.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/precomputed_shardselector.go index 2a20272f6e..909d1fedfa 100644 --- a/flyteplugins/go/tasks/pluginmachinery/ioutils/precomputed_shardselector.go +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/precomputed_shardselector.go @@ -68,6 +68,6 @@ func NewBase36PrefixShardSelector(ctx context.Context) (ShardSelector, error) { func NewConstantShardSelector(shards []string) ShardSelector { return &PrecomputedShardSelector{ precomputedPrefixes: shards, - buckets: uint32(len(shards)), + buckets: uint32(len(shards)), // #nosec G115 } } diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader.go index ae880f3640..c28f5ac94d 100644 --- a/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader.go +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader.go @@ -105,7 +105,7 @@ func (s *singleFileErrorReader) IsError(ctx context.Context) (bool, error) { } func errorDoc2ExecutionError(errorDoc *core.ErrorDocument, errorFilePath storage.DataReference) io.ExecutionError { - if errorDoc.Error == nil { + if errorDoc.GetError() == nil { return io.ExecutionError{ IsRecoverable: true, ExecutionError: &core.ExecutionError{ @@ -117,15 +117,15 @@ func errorDoc2ExecutionError(errorDoc *core.ErrorDocument, errorFilePath storage } executionError := io.ExecutionError{ ExecutionError: &core.ExecutionError{ - Code: errorDoc.Error.Code, - Message: errorDoc.Error.Message, - Kind: errorDoc.Error.Origin, - Timestamp: errorDoc.Error.Timestamp, - Worker: errorDoc.Error.Worker, + Code: errorDoc.GetError().GetCode(), + Message: errorDoc.GetError().GetMessage(), + Kind: errorDoc.GetError().GetOrigin(), + Timestamp: errorDoc.GetError().GetTimestamp(), + Worker: errorDoc.GetError().GetWorker(), }, } - if errorDoc.Error.Kind == core.ContainerError_RECOVERABLE { + if errorDoc.GetError().GetKind() == core.ContainerError_RECOVERABLE { executionError.IsRecoverable = true } @@ -201,7 +201,7 @@ func (e *earliestFileErrorReader) ReadError(ctx context.Context) (io.ExecutionEr if err != nil { return io.ExecutionError{}, errors.Wrapf(err, "failed to read error file @[%s]", errorFilePath.String()) } - timestamp := errorDoc.Error.GetTimestamp().AsTime() + timestamp := errorDoc.GetError().GetTimestamp().AsTime() if earliestTimestamp == nil || earliestTimestamp.After(timestamp) { earliestExecutionError = errorDoc2ExecutionError(errorDoc, errorFilePath) earliestTimestamp = ×tamp diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader_test.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader_test.go index 1cd7099f78..b2dbb0ba55 100644 --- a/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader_test.go +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader_test.go @@ -91,7 +91,7 @@ func TestReadOrigin(t *testing.T) { incomingErrorDoc := args.Get(2) assert.NotNil(t, incomingErrorDoc) casted := incomingErrorDoc.(*core.ErrorDocument) - casted.Error = errorDoc.Error + casted.Error = errorDoc.GetError() }).Return(nil) store.OnHead(ctx, storage.DataReference("deck.html")).Return(MemoryMetadata{ @@ -129,7 +129,7 @@ func TestReadOrigin(t *testing.T) { incomingErrorDoc := args.Get(2) assert.NotNil(t, incomingErrorDoc) casted := incomingErrorDoc.(*core.ErrorDocument) - casted.Error = errorDoc.Error + casted.Error = errorDoc.GetError() }).Return(nil) maxPayloadSize := int64(0) @@ -168,21 +168,21 @@ func TestReadOrigin(t *testing.T) { incomingErrorDoc := args.Get(2) assert.NotNil(t, incomingErrorDoc) casted := incomingErrorDoc.(*core.ErrorDocument) - casted.Error = errorDoc.Error + casted.Error = errorDoc.GetError() }).Return(nil) store.OnList(ctx, storage.DataReference("s3://errors/error"), 1000, storage.NewCursorAtStart()).Return( - []storage.DataReference{"error-0.pb", "error-1.pb", "error-2.pb"}, storage.NewCursorAtEnd(), nil) + []storage.DataReference{"s3://errors/error-0.pb", "s3://errors/error-1.pb", "s3://errors/error-2.pb"}, storage.NewCursorAtEnd(), nil) - store.OnHead(ctx, storage.DataReference("error-0.pb")).Return(MemoryMetadata{ + store.OnHead(ctx, storage.DataReference("s3://errors/error-0.pb")).Return(MemoryMetadata{ exists: true, }, nil) - store.OnHead(ctx, storage.DataReference("error-1.pb")).Return(MemoryMetadata{ + store.OnHead(ctx, storage.DataReference("s3://errors/error-1.pb")).Return(MemoryMetadata{ exists: true, }, nil) - store.OnHead(ctx, storage.DataReference("error-2.pb")).Return(MemoryMetadata{ + store.OnHead(ctx, storage.DataReference("s3://errors/error-2.pb")).Return(MemoryMetadata{ exists: true, }, nil) @@ -227,13 +227,13 @@ func TestReadOrigin(t *testing.T) { incomingErrorDoc := args.Get(2) assert.NotNil(t, incomingErrorDoc) casted := incomingErrorDoc.(*core.ErrorDocument) - casted.Error = errorDoc.Error + casted.Error = errorDoc.GetError() }).Return(nil) store.OnList(ctx, storage.DataReference("s3://errors/error"), 1000, storage.NewCursorAtStart()).Return( - []storage.DataReference{"error.pb"}, storage.NewCursorAtEnd(), nil) + []storage.DataReference{"s3://errors/error.pb"}, storage.NewCursorAtEnd(), nil) - store.OnHead(ctx, storage.DataReference("error.pb")).Return(MemoryMetadata{ + store.OnHead(ctx, storage.DataReference("s3://errors/error.pb")).Return(MemoryMetadata{ exists: true, }, nil) diff --git a/flyteplugins/go/tasks/pluginmachinery/tasklog/template.go b/flyteplugins/go/tasks/pluginmachinery/tasklog/template.go index 19aae6ba7c..e8b7a4abed 100644 --- a/flyteplugins/go/tasks/pluginmachinery/tasklog/template.go +++ b/flyteplugins/go/tasks/pluginmachinery/tasklog/template.go @@ -120,44 +120,44 @@ func (input Input) templateVars() []TemplateVar { }, TemplateVar{ defaultRegexes.TaskRetryAttempt, - strconv.FormatUint(uint64(taskExecutionIdentifier.RetryAttempt), 10), + strconv.FormatUint(uint64(taskExecutionIdentifier.GetRetryAttempt()), 10), }, ) - if taskExecutionIdentifier.TaskId != nil { + if taskExecutionIdentifier.GetTaskId() != nil { vars = append( vars, TemplateVar{ defaultRegexes.TaskID, - taskExecutionIdentifier.TaskId.Name, + taskExecutionIdentifier.GetTaskId().GetName(), }, TemplateVar{ defaultRegexes.TaskVersion, - taskExecutionIdentifier.TaskId.Version, + taskExecutionIdentifier.GetTaskId().GetVersion(), }, TemplateVar{ defaultRegexes.TaskProject, - taskExecutionIdentifier.TaskId.Project, + taskExecutionIdentifier.GetTaskId().GetProject(), }, TemplateVar{ defaultRegexes.TaskDomain, - taskExecutionIdentifier.TaskId.Domain, + taskExecutionIdentifier.GetTaskId().GetDomain(), }, ) } - if taskExecutionIdentifier.NodeExecutionId != nil && taskExecutionIdentifier.NodeExecutionId.ExecutionId != nil { + if taskExecutionIdentifier.GetNodeExecutionId() != nil && taskExecutionIdentifier.GetNodeExecutionId().GetExecutionId() != nil { vars = append( vars, TemplateVar{ defaultRegexes.ExecutionName, - taskExecutionIdentifier.NodeExecutionId.ExecutionId.Name, + taskExecutionIdentifier.GetNodeExecutionId().GetExecutionId().GetName(), }, TemplateVar{ defaultRegexes.ExecutionProject, - taskExecutionIdentifier.NodeExecutionId.ExecutionId.Project, + taskExecutionIdentifier.GetNodeExecutionId().GetExecutionId().GetProject(), }, TemplateVar{ defaultRegexes.ExecutionDomain, - taskExecutionIdentifier.NodeExecutionId.ExecutionId.Domain, + taskExecutionIdentifier.GetNodeExecutionId().GetExecutionId().GetDomain(), }, ) } @@ -219,9 +219,11 @@ func (p TemplateLogPlugin) GetTaskLogs(input Input) (Output, error) { } } taskLogs = append(taskLogs, &core.TaskLog{ - Uri: replaceAll(dynamicTemplateURI, templateVars), - Name: p.DisplayName + input.LogName, - MessageFormat: p.MessageFormat, + Uri: replaceAll(dynamicTemplateURI, templateVars), + Name: p.DisplayName + input.LogName, + MessageFormat: p.MessageFormat, + ShowWhilePending: p.ShowWhilePending, + HideOnceFinished: p.HideOnceFinished, }) } } diff --git a/flyteplugins/go/tasks/pluginmachinery/tasklog/template_test.go b/flyteplugins/go/tasks/pluginmachinery/tasklog/template_test.go index 42226bd7c0..2e885732f8 100644 --- a/flyteplugins/go/tasks/pluginmachinery/tasklog/template_test.go +++ b/flyteplugins/go/tasks/pluginmachinery/tasklog/template_test.go @@ -478,6 +478,37 @@ func TestTemplateLogPlugin(t *testing.T) { }, }, }, + { + "flyteinteractive", + TemplateLogPlugin{ + Name: "vscode", + DynamicTemplateURIs: []TemplateURI{"vscode://flyteinteractive:{{ .taskConfig.port }}/{{ .podName }}"}, + MessageFormat: core.TaskLog_JSON, + HideOnceFinished: true, + ShowWhilePending: true, + }, + args{ + input: Input{ + PodName: "my-pod-name", + TaskTemplate: &core.TaskTemplate{ + Config: map[string]string{ + "link_type": "vscode", + "port": "1234", + }, + }, + }, + }, + Output{ + TaskLogs: []*core.TaskLog{ + { + Uri: "vscode://flyteinteractive:1234/my-pod-name", + MessageFormat: core.TaskLog_JSON, + ShowWhilePending: true, + HideOnceFinished: true, + }, + }, + }, + }, { "flyteinteractive - no link_type in task template", TemplateLogPlugin{ diff --git a/flyteplugins/go/tasks/pluginmachinery/utils/secrets/marshaler.go b/flyteplugins/go/tasks/pluginmachinery/utils/secrets/marshaler.go index b6ea59020b..91423a9929 100644 --- a/flyteplugins/go/tasks/pluginmachinery/utils/secrets/marshaler.go +++ b/flyteplugins/go/tasks/pluginmachinery/utils/secrets/marshaler.go @@ -53,8 +53,8 @@ func unmarshalSecret(encoded string) (*core.Secret, error) { func MarshalSecretsToMapStrings(secrets []*core.Secret) (map[string]string, error) { res := make(map[string]string, len(secrets)) for index, s := range secrets { - if _, found := core.Secret_MountType_name[int32(s.MountRequirement)]; !found { - return nil, fmt.Errorf("invalid mount requirement [%v]", s.MountRequirement) + if _, found := core.Secret_MountType_name[int32(s.GetMountRequirement())]; !found { + return nil, fmt.Errorf("invalid mount requirement [%v]", s.GetMountRequirement()) } encodedSecret := marshalSecret(s) diff --git a/flyteplugins/go/tasks/pluginmachinery/workqueue/queue.go b/flyteplugins/go/tasks/pluginmachinery/workqueue/queue.go index fce1acde89..cf5fc8e451 100644 --- a/flyteplugins/go/tasks/pluginmachinery/workqueue/queue.go +++ b/flyteplugins/go/tasks/pluginmachinery/workqueue/queue.go @@ -227,6 +227,7 @@ func (q *queue) Start(ctx context.Context) error { wrapper.retryCount++ wrapper.err = err + // #nosec G115 if wrapper.retryCount >= uint(q.maxRetries) { logger.Debugf(ctx, "WorkItem [%v] exhausted all retries. Last Error: %v.", wrapper.ID(), err) diff --git a/flyteplugins/go/tasks/plugins/array/arraystatus/status_test.go b/flyteplugins/go/tasks/plugins/array/arraystatus/status_test.go index 96aabcfcc6..a9ebea2825 100644 --- a/flyteplugins/go/tasks/plugins/array/arraystatus/status_test.go +++ b/flyteplugins/go/tasks/plugins/array/arraystatus/status_test.go @@ -29,7 +29,7 @@ func TestArrayStatus_HashCode(t *testing.T) { }) t.Run("Populated Equal", func(t *testing.T) { - expectedDetailed, err := bitarray.NewCompactArray(size, bitarray.Item(len(types.Phases)-1)) + expectedDetailed, err := bitarray.NewCompactArray(size, bitarray.Item(len(types.Phases)-1)) // #nosec G115 assert.Nil(t, err) expected := ArrayStatus{ Detailed: expectedDetailed, @@ -37,7 +37,7 @@ func TestArrayStatus_HashCode(t *testing.T) { expectedHashCode, err := expected.HashCode() assert.Nil(t, err) - actualDetailed, err := bitarray.NewCompactArray(size, bitarray.Item(len(types.Phases)-1)) + actualDetailed, err := bitarray.NewCompactArray(size, bitarray.Item(len(types.Phases)-1)) // #nosec G115 assert.Nil(t, err) actual := ArrayStatus{ Detailed: actualDetailed, @@ -49,7 +49,7 @@ func TestArrayStatus_HashCode(t *testing.T) { }) t.Run("Updated Not Equal", func(t *testing.T) { - expectedDetailed, err := bitarray.NewCompactArray(size, bitarray.Item(len(types.Phases)-1)) + expectedDetailed, err := bitarray.NewCompactArray(size, bitarray.Item(len(types.Phases)-1)) // #nosec G115 assert.Nil(t, err) expectedDetailed.SetItem(0, uint64(1)) expected := ArrayStatus{ @@ -58,7 +58,7 @@ func TestArrayStatus_HashCode(t *testing.T) { expectedHashCode, err := expected.HashCode() assert.Nil(t, err) - actualDetailed, err := bitarray.NewCompactArray(size, bitarray.Item(len(types.Phases)-1)) + actualDetailed, err := bitarray.NewCompactArray(size, bitarray.Item(len(types.Phases)-1)) // #nosec G115 assert.Nil(t, err) actual := ArrayStatus{ Detailed: actualDetailed, @@ -70,7 +70,7 @@ func TestArrayStatus_HashCode(t *testing.T) { }) t.Run("Updated Equal", func(t *testing.T) { - expectedDetailed, err := bitarray.NewCompactArray(size, bitarray.Item(len(types.Phases)-1)) + expectedDetailed, err := bitarray.NewCompactArray(size, bitarray.Item(len(types.Phases)-1)) // #nosec G115 assert.Nil(t, err) expectedDetailed.SetItem(0, uint64(1)) expected := ArrayStatus{ @@ -79,7 +79,7 @@ func TestArrayStatus_HashCode(t *testing.T) { expectedHashCode, err := expected.HashCode() assert.Nil(t, err) - actualDetailed, err := bitarray.NewCompactArray(size, bitarray.Item(len(types.Phases)-1)) + actualDetailed, err := bitarray.NewCompactArray(size, bitarray.Item(len(types.Phases)-1)) // #nosec G115 actualDetailed.SetItem(0, uint64(1)) assert.Nil(t, err) actual := ArrayStatus{ diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/client_test.go b/flyteplugins/go/tasks/plugins/array/awsbatch/client_test.go index fe35f74e2a..e135aee020 100644 --- a/flyteplugins/go/tasks/plugins/array/awsbatch/client_test.go +++ b/flyteplugins/go/tasks/plugins/array/awsbatch/client_test.go @@ -29,7 +29,7 @@ func TestClient_SubmitJob(t *testing.T) { c := NewCustomBatchClient(mocks.NewMockAwsBatchClient(), "account-id", "test-region", rateLimiter, rateLimiter).(*client) store, err := NewJobStore(ctx, c, config.JobStoreConfig{ CacheSize: 1, - Parallelizm: 1, + Parallelism: 1, BatchChunkSize: 1, ResyncPeriod: stdConfig.Duration{Duration: 1000}, }, EventHandler{}, promutils.NewTestScope()) diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/config/config.go b/flyteplugins/go/tasks/plugins/array/awsbatch/config/config.go index 7b8a484140..7815c23ff5 100644 --- a/flyteplugins/go/tasks/plugins/array/awsbatch/config/config.go +++ b/flyteplugins/go/tasks/plugins/array/awsbatch/config/config.go @@ -30,7 +30,7 @@ type Config struct { type JobStoreConfig struct { CacheSize int `json:"jacheSize" pflag:",Maximum informer cache size as number of items. Caches are used as an optimization to lessen the load on AWS Services."` - Parallelizm int `json:"parallelizm"` + Parallelism int `json:"parallelism"` BatchChunkSize int `json:"batchChunkSize" pflag:",Determines the size of each batch sent to GetJobDetails api."` ResyncPeriod config.Duration `json:"resyncPeriod" pflag:",Defines the duration for syncing job details from AWS Batch."` } @@ -39,7 +39,7 @@ var ( defaultConfig = &Config{ JobStoreConfig: JobStoreConfig{ CacheSize: 10000, - Parallelizm: 20, + Parallelism: 20, BatchChunkSize: 100, ResyncPeriod: config.Duration{Duration: 30 * time.Second}, }, diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/config/config_flags.go b/flyteplugins/go/tasks/plugins/array/awsbatch/config/config_flags.go index a4cb6b3b0c..5b482fb99d 100755 --- a/flyteplugins/go/tasks/plugins/array/awsbatch/config/config_flags.go +++ b/flyteplugins/go/tasks/plugins/array/awsbatch/config/config_flags.go @@ -51,7 +51,7 @@ func (Config) mustMarshalJSON(v json.Marshaler) string { func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { cmdFlags := pflag.NewFlagSet("Config", pflag.ExitOnError) cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "jobStoreConfig.jacheSize"), defaultConfig.JobStoreConfig.CacheSize, "Maximum informer cache size as number of items. Caches are used as an optimization to lessen the load on AWS Services.") - cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "jobStoreConfig.parallelizm"), defaultConfig.JobStoreConfig.Parallelizm, "") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "jobStoreConfig.parallelism"), defaultConfig.JobStoreConfig.Parallelism, "") cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "jobStoreConfig.batchChunkSize"), defaultConfig.JobStoreConfig.BatchChunkSize, "Determines the size of each batch sent to GetJobDetails api.") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "jobStoreConfig.resyncPeriod"), defaultConfig.JobStoreConfig.ResyncPeriod.String(), "Defines the duration for syncing job details from AWS Batch.") cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "defCacheSize"), defaultConfig.JobDefCacheSize, "Maximum job definition cache size as number of items. Caches are used as an optimization to lessen the load on AWS Services.") diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/config/config_flags_test.go b/flyteplugins/go/tasks/plugins/array/awsbatch/config/config_flags_test.go index 62d8dc5ac2..9d06838911 100755 --- a/flyteplugins/go/tasks/plugins/array/awsbatch/config/config_flags_test.go +++ b/flyteplugins/go/tasks/plugins/array/awsbatch/config/config_flags_test.go @@ -113,14 +113,14 @@ func TestConfig_SetFlags(t *testing.T) { } }) }) - t.Run("Test_jobStoreConfig.parallelizm", func(t *testing.T) { + t.Run("Test_jobStoreConfig.parallelism", func(t *testing.T) { t.Run("Override", func(t *testing.T) { testValue := "1" - cmdFlags.Set("jobStoreConfig.parallelizm", testValue) - if vInt, err := cmdFlags.GetInt("jobStoreConfig.parallelizm"); err == nil { - testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.JobStoreConfig.Parallelizm) + cmdFlags.Set("jobStoreConfig.parallelism", testValue) + if vInt, err := cmdFlags.GetInt("jobStoreConfig.parallelism"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.JobStoreConfig.Parallelism) } else { assert.FailNow(t, err.Error()) diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/job_config.go b/flyteplugins/go/tasks/plugins/array/awsbatch/job_config.go index 1ca10cb39d..2831dd28ae 100644 --- a/flyteplugins/go/tasks/plugins/array/awsbatch/job_config.go +++ b/flyteplugins/go/tasks/plugins/array/awsbatch/job_config.go @@ -41,7 +41,7 @@ func (j *JobConfig) setKeyIfKnown(key, value string) bool { func (j *JobConfig) MergeFromKeyValuePairs(pairs []*core.KeyValuePair) *JobConfig { for _, entry := range pairs { - j.setKeyIfKnown(entry.Key, entry.Value) + j.setKeyIfKnown(entry.GetKey(), entry.GetValue()) } return j diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/job_definition.go b/flyteplugins/go/tasks/plugins/array/awsbatch/job_definition.go index 1ef9e4ec5b..acd5f124dd 100644 --- a/flyteplugins/go/tasks/plugins/array/awsbatch/job_definition.go +++ b/flyteplugins/go/tasks/plugins/array/awsbatch/job_definition.go @@ -19,8 +19,8 @@ const defaultComputeEngine = "EC2" const platformCapabilitiesConfigKey = "platformCapabilities" func getContainerImage(_ context.Context, task *core.TaskTemplate) string { - if task.GetContainer() != nil && len(task.GetContainer().Image) > 0 { - return task.GetContainer().Image + if task.GetContainer() != nil && len(task.GetContainer().GetImage()) > 0 { + return task.GetContainer().GetImage() } return "" diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/jobs_store.go b/flyteplugins/go/tasks/plugins/array/awsbatch/jobs_store.go index 16d44b490e..06a1d7d155 100644 --- a/flyteplugins/go/tasks/plugins/array/awsbatch/jobs_store.go +++ b/flyteplugins/go/tasks/plugins/array/awsbatch/jobs_store.go @@ -372,7 +372,7 @@ func NewJobStore(ctx context.Context, batchClient Client, cfg config.JobStoreCon autoCache, err := cache.NewAutoRefreshBatchedCache("aws-batch-jobs", batchJobsForSync(ctx, cfg.BatchChunkSize), syncBatches(ctx, store, handler, cfg.BatchChunkSize), workqueue.DefaultControllerRateLimiter(), cfg.ResyncPeriod.Duration, - cfg.Parallelizm, cfg.CacheSize, scope) + uint(cfg.Parallelism), uint(cfg.CacheSize), scope) // #nosec G115 store.AutoRefresh = autoCache return store, err diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/jobs_store_test.go b/flyteplugins/go/tasks/plugins/array/awsbatch/jobs_store_test.go index 8196925e15..122d03c71a 100644 --- a/flyteplugins/go/tasks/plugins/array/awsbatch/jobs_store_test.go +++ b/flyteplugins/go/tasks/plugins/array/awsbatch/jobs_store_test.go @@ -35,7 +35,7 @@ func newJobsStore(t testing.TB, batchClient Client) *JobStore { func newJobsStoreWithSize(t testing.TB, batchClient Client, size int) *JobStore { store, err := NewJobStore(context.TODO(), batchClient, config.JobStoreConfig{ CacheSize: size, - Parallelizm: 1, + Parallelism: 1, BatchChunkSize: 2, ResyncPeriod: config2.Duration{Duration: 1000}, }, EventHandler{}, promutils.NewTestScope()) diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/launcher.go b/flyteplugins/go/tasks/plugins/array/awsbatch/launcher.go index 609bab6cf7..d42c5ea0fe 100644 --- a/flyteplugins/go/tasks/plugins/array/awsbatch/launcher.go +++ b/flyteplugins/go/tasks/plugins/array/awsbatch/launcher.go @@ -33,8 +33,8 @@ func LaunchSubTasks(ctx context.Context, tCtx core.TaskExecutionContext, batchCl } // If the original job was marked as an array (not a single job), then make sure to set it up correctly. - if t.Type == arrayTaskType { - logger.Debugf(ctx, "Task is of type [%v]. Will setup task index env vars.", t.Type) + if t.GetType() == arrayTaskType { + logger.Debugf(ctx, "Task is of type [%v]. Will setup task index env vars.", t.GetType()) batchInput = UpdateBatchInputForArray(ctx, batchInput, int64(size)) } @@ -46,7 +46,7 @@ func LaunchSubTasks(ctx context.Context, tCtx core.TaskExecutionContext, batchCl metrics.SubTasksSubmitted.Add(ctx, float64(size)) - retryAttemptsArray, err := bitarray.NewCompactArray(uint(size), bitarray.Item(pluginConfig.MaxRetries)) + retryAttemptsArray, err := bitarray.NewCompactArray(uint(size), bitarray.Item(pluginConfig.MaxRetries)) // #nosec G115 if err != nil { logger.Errorf(context.Background(), "Failed to create attempts compact array with [count: %v, maxValue: %v]", size, pluginConfig.MaxRetries) return nil, err @@ -58,7 +58,7 @@ func LaunchSubTasks(ctx context.Context, tCtx core.TaskExecutionContext, batchCl Summary: arraystatus.ArraySummary{ core.PhaseQueued: int64(size), }, - Detailed: arrayCore.NewPhasesCompactArray(uint(size)), + Detailed: arrayCore.NewPhasesCompactArray(uint(size)), // #nosec G115 }). SetReason("Successfully launched subtasks."). SetRetryAttempts(retryAttemptsArray) diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/monitor.go b/flyteplugins/go/tasks/plugins/array/awsbatch/monitor.go index 62bc5103dc..d5c05f6cd0 100644 --- a/flyteplugins/go/tasks/plugins/array/awsbatch/monitor.go +++ b/flyteplugins/go/tasks/plugins/array/awsbatch/monitor.go @@ -45,7 +45,7 @@ func CheckSubTasksState(ctx context.Context, tCtx core.TaskExecutionContext, job } else if taskTemplate == nil { return nil, errors.Errorf(errors.BadTaskSpecification, "Required value not set, taskTemplate is nil") } - retry := toRetryStrategy(ctx, toBackoffLimit(taskTemplate.Metadata), cfg.MinRetries, cfg.MaxRetries) + retry := toRetryStrategy(ctx, toBackoffLimit(taskTemplate.GetMetadata()), cfg.MinRetries, cfg.MaxRetries) // If job isn't currently being monitored (recovering from a restart?), add it to the sync-cache and return if job == nil { @@ -67,7 +67,7 @@ func CheckSubTasksState(ctx context.Context, tCtx core.TaskExecutionContext, job msg := errorcollector.NewErrorMessageCollector() newArrayStatus := arraystatus.ArrayStatus{ Summary: arraystatus.ArraySummary{}, - Detailed: arrayCore.NewPhasesCompactArray(uint(currentState.GetExecutionArraySize())), + Detailed: arrayCore.NewPhasesCompactArray(uint(currentState.GetExecutionArraySize())), // #nosec G115 } currentSubTaskPhaseHash, err := currentState.GetArrayStatus().HashCode() @@ -126,7 +126,7 @@ func CheckSubTasksState(ctx context.Context, tCtx core.TaskExecutionContext, job } } - newArrayStatus.Detailed.SetItem(childIdx, bitarray.Item(actualPhase)) + newArrayStatus.Detailed.SetItem(childIdx, bitarray.Item(actualPhase)) // #nosec G115 newArrayStatus.Summary.Inc(actualPhase) parentState.RetryAttempts.SetItem(childIdx, bitarray.Item(len(subJob.Attempts))) } diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/task_links.go b/flyteplugins/go/tasks/plugins/array/awsbatch/task_links.go index caf2e51a38..64b64ac168 100644 --- a/flyteplugins/go/tasks/plugins/array/awsbatch/task_links.go +++ b/flyteplugins/go/tasks/plugins/array/awsbatch/task_links.go @@ -89,9 +89,9 @@ func GetTaskLinks(ctx context.Context, taskMeta pluginCore.TaskExecutionMetadata externalResources = append(externalResources, &pluginCore.ExternalResource{ ExternalID: subJob.ID, - Index: uint32(originalIndex), + Index: uint32(originalIndex), // #nosec G115 Logs: subTaskLogLinks, - RetryAttempt: uint32(retryAttempt), + RetryAttempt: uint32(retryAttempt), // #nosec G115 Phase: finalPhase, }) } diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/transformer.go b/flyteplugins/go/tasks/plugins/array/awsbatch/transformer.go index 1eaef150d0..936269f2b1 100644 --- a/flyteplugins/go/tasks/plugins/array/awsbatch/transformer.go +++ b/flyteplugins/go/tasks/plugins/array/awsbatch/transformer.go @@ -109,9 +109,9 @@ func FlyteTaskToBatchInput(ctx context.Context, tCtx pluginCore.TaskExecutionCon } submitJobInput.SetJobName(tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName()). SetJobDefinition(jobDefinition).SetJobQueue(jobConfig.DynamicTaskQueue). - SetRetryStrategy(toRetryStrategy(ctx, toBackoffLimit(taskTemplate.Metadata), cfg.MinRetries, cfg.MaxRetries)). + SetRetryStrategy(toRetryStrategy(ctx, toBackoffLimit(taskTemplate.GetMetadata()), cfg.MinRetries, cfg.MaxRetries)). SetContainerOverrides(toContainerOverrides(ctx, append(cmd, args...), &resources, envVars)). - SetTimeout(toTimeout(taskTemplate.Metadata.GetTimeout(), cfg.DefaultTimeOut.Duration)) + SetTimeout(toTimeout(taskTemplate.GetMetadata().GetTimeout(), cfg.DefaultTimeOut.Duration)) return submitJobInput, nil } @@ -159,7 +159,7 @@ func getEnvVarsForTask(ctx context.Context, execID pluginCore.TaskExecutionID, c } func toTimeout(templateTimeout *duration.Duration, defaultTimeout time.Duration) *batch.JobTimeout { - if templateTimeout != nil && templateTimeout.Seconds > 0 { + if templateTimeout != nil && templateTimeout.GetSeconds() > 0 { return (&batch.JobTimeout{}).SetAttemptDurationSeconds(templateTimeout.GetSeconds()) } @@ -239,11 +239,11 @@ func toRetryStrategy(_ context.Context, backoffLimit *int32, minRetryAttempts, m } func toBackoffLimit(metadata *idlCore.TaskMetadata) *int32 { - if metadata == nil || metadata.Retries == nil { + if metadata == nil || metadata.GetRetries() == nil { return nil } - i := int32(metadata.Retries.Retries) + i := int32(metadata.GetRetries().GetRetries()) // #nosec G115 return &i } diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/transformer_test.go b/flyteplugins/go/tasks/plugins/array/awsbatch/transformer_test.go index bbe8c88995..642493346e 100644 --- a/flyteplugins/go/tasks/plugins/array/awsbatch/transformer_test.go +++ b/flyteplugins/go/tasks/plugins/array/awsbatch/transformer_test.go @@ -198,7 +198,7 @@ func TestArrayJobToBatchInput(t *testing.T) { batchInput, err := FlyteTaskToBatchInput(ctx, taskCtx, "", &config.Config{}) assert.NoError(t, err) - batchInput = UpdateBatchInputForArray(ctx, batchInput, input.Size) + batchInput = UpdateBatchInputForArray(ctx, batchInput, input.GetSize()) assert.NotNil(t, batchInput) assert.Equal(t, *expectedBatchInput, *batchInput) diff --git a/flyteplugins/go/tasks/plugins/array/catalog.go b/flyteplugins/go/tasks/plugins/array/catalog.go index d6bf5e8820..60b4b224ac 100644 --- a/flyteplugins/go/tasks/plugins/array/catalog.go +++ b/flyteplugins/go/tasks/plugins/array/catalog.go @@ -39,7 +39,7 @@ func DetermineDiscoverability(ctx context.Context, tCtx core.TaskExecutionContex // Extract the custom plugin pb var arrayJob *idlPlugins.ArrayJob - if taskTemplate.Type == AwsBatchTaskType { + if taskTemplate.GetType() == AwsBatchTaskType { arrayJob = &idlPlugins.ArrayJob{ Parallelism: 1, Size: 1, @@ -48,7 +48,7 @@ func DetermineDiscoverability(ctx context.Context, tCtx core.TaskExecutionContex }, } } else { - arrayJob, err = arrayCore.ToArrayJob(taskTemplate.GetCustom(), taskTemplate.TaskTypeVersion) + arrayJob, err = arrayCore.ToArrayJob(taskTemplate.GetCustom(), taskTemplate.GetTaskTypeVersion()) } if err != nil { return state, err @@ -58,9 +58,9 @@ func DetermineDiscoverability(ctx context.Context, tCtx core.TaskExecutionContex var inputReaders []io.InputReader // Save this in the state - if taskTemplate.TaskTypeVersion == 0 { - state = state.SetOriginalArraySize(arrayJob.Size) - arrayJobSize = arrayJob.Size + if taskTemplate.GetTaskTypeVersion() == 0 { + state = state.SetOriginalArraySize(arrayJob.GetSize()) + arrayJobSize = arrayJob.GetSize() state = state.SetOriginalMinSuccesses(arrayJob.GetMinSuccesses()) // build input readers @@ -77,15 +77,15 @@ func DetermineDiscoverability(ctx context.Context, tCtx core.TaskExecutionContex // identify and validate the size of the array job size := -1 var literalCollection *idlCore.LiteralCollection - for _, literal := range inputs.Literals { + for _, literal := range inputs.GetLiterals() { if literalCollection = literal.GetCollection(); literalCollection != nil { // validate length of input list - if size != -1 && size != len(literalCollection.Literals) { + if size != -1 && size != len(literalCollection.GetLiterals()) { state = state.SetPhase(arrayCore.PhasePermanentFailure, 0).SetReason("all maptask input lists must be the same length") return state, nil } - size = len(literalCollection.Literals) + size = len(literalCollection.GetLiterals()) } } @@ -106,7 +106,7 @@ func DetermineDiscoverability(ctx context.Context, tCtx core.TaskExecutionContex arrayJobSize = int64(size) // build input readers - inputReaders = ConstructStaticInputReaders(tCtx.InputReader(), inputs.Literals, size) + inputReaders = ConstructStaticInputReaders(tCtx.InputReader(), inputs.GetLiterals(), size) } if arrayJobSize > maxArrayJobSize { @@ -117,10 +117,10 @@ func DetermineDiscoverability(ctx context.Context, tCtx core.TaskExecutionContex } // If the task is not discoverable, then skip data catalog work and move directly to launch - if taskTemplate.Metadata == nil || !taskTemplate.Metadata.Discoverable { + if taskTemplate.GetMetadata() == nil || !taskTemplate.GetMetadata().GetDiscoverable() { logger.Infof(ctx, "Task is not discoverable, moving to launch phase...") // Set an all set indexes to cache. This task won't try to write to catalog anyway. - state = state.SetIndexesToCache(arrayCore.InvertBitSet(bitarray.NewBitSet(uint(arrayJobSize)), uint(arrayJobSize))) + state = state.SetIndexesToCache(arrayCore.InvertBitSet(bitarray.NewBitSet(uint(arrayJobSize)), uint(arrayJobSize))) // #nosec G115 state = state.SetPhase(arrayCore.PhasePreLaunch, core.DefaultPhaseVersion).SetReason("Task is not discoverable.") state.SetExecutionArraySize(int(arrayJobSize)) @@ -165,7 +165,7 @@ func DetermineDiscoverability(ctx context.Context, tCtx core.TaskExecutionContex // TODO: maybe add a config option to decide the behavior on catalog failure. logger.Warnf(ctx, "Failing to lookup catalog. Will move on to launching the task. Error: %v", err) - state = state.SetIndexesToCache(arrayCore.InvertBitSet(bitarray.NewBitSet(uint(arrayJobSize)), uint(arrayJobSize))) + state = state.SetIndexesToCache(arrayCore.InvertBitSet(bitarray.NewBitSet(uint(arrayJobSize)), uint(arrayJobSize))) // #nosec G115 state = state.SetExecutionArraySize(int(arrayJobSize)) state = state.SetPhase(arrayCore.PhasePreLaunch, core.DefaultPhaseVersion).SetReason(fmt.Sprintf("Skipping cache check due to err [%v]", err)) return state, nil @@ -178,7 +178,7 @@ func DetermineDiscoverability(ctx context.Context, tCtx core.TaskExecutionContex } cachedResults := resp.GetCachedResults() - state = state.SetIndexesToCache(arrayCore.InvertBitSet(cachedResults, uint(arrayJobSize))) + state = state.SetIndexesToCache(arrayCore.InvertBitSet(cachedResults, uint(arrayJobSize))) // #nosec G115 state = state.SetExecutionArraySize(int(arrayJobSize) - resp.GetCachedCount()) // If all the sub-tasks are actually done, then we can just move on. @@ -223,14 +223,14 @@ func WriteToDiscovery(ctx context.Context, tCtx core.TaskExecutionContext, state return state, externalResources, errors.Errorf(errors.BadTaskSpecification, "Required value not set, taskTemplate is nil") } - if tMeta := taskTemplate.Metadata; tMeta == nil || !tMeta.Discoverable { + if tMeta := taskTemplate.GetMetadata(); tMeta == nil || !tMeta.GetDiscoverable() { logger.Debugf(ctx, "Task is not marked as discoverable. Moving to [%v] phase.", phaseOnSuccess) return state.SetPhase(phaseOnSuccess, versionOnSuccess).SetReason("Task is not discoverable."), externalResources, nil } var inputReaders []io.InputReader arrayJobSize := int(state.GetOriginalArraySize()) - if taskTemplate.TaskTypeVersion == 0 { + if taskTemplate.GetTaskTypeVersion() == 0 { // input readers inputReaders, err = ConstructRemoteFileInputReaders(ctx, tCtx.DataStore(), tCtx.InputReader().GetInputPrefixPath(), arrayJobSize) if err != nil { @@ -242,7 +242,7 @@ func WriteToDiscovery(ctx context.Context, tCtx core.TaskExecutionContext, state return state, externalResources, errors.Errorf(errors.MetadataAccessFailed, "Could not read inputs and therefore failed to determine array job size") } - inputReaders = ConstructStaticInputReaders(tCtx.InputReader(), inputs.Literals, arrayJobSize) + inputReaders = ConstructStaticInputReaders(tCtx.InputReader(), inputs.GetLiterals(), arrayJobSize) } // output reader @@ -251,8 +251,8 @@ func WriteToDiscovery(ctx context.Context, tCtx core.TaskExecutionContext, state return nil, externalResources, err } - iface := *taskTemplate.Interface - iface.Outputs = makeSingularTaskInterface(iface.Outputs) + iface := taskTemplate.GetInterface() + iface.Outputs = makeSingularTaskInterface(iface.GetOutputs()) // Do not cache failed tasks. Retrieve the final phase from array status and unset the non-successful ones. @@ -262,14 +262,15 @@ func WriteToDiscovery(ctx context.Context, tCtx core.TaskExecutionContext, state if !phase.IsSuccess() { // tasksToCache is built on the originalArraySize and ArrayStatus.Detailed is the executionArraySize originalIdx := arrayCore.CalculateOriginalIndex(idx, state.GetIndexesToCache()) - tasksToCache.Clear(uint(originalIdx)) + tasksToCache.Clear(uint(originalIdx)) // #nosec G115 } } // Create catalog put items, but only put the ones that were not originally cached (as read from the catalog results bitset) - catalogWriterItems, err := ConstructCatalogUploadRequests(*tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetID().TaskId, - tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetID(), taskTemplate.Metadata.DiscoveryVersion, - taskTemplate.Metadata.CacheIgnoreInputVars, iface, &tasksToCache, inputReaders, outputReaders) + //nolint:protogetter + catalogWriterItems, err := ConstructCatalogUploadRequests(tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetID().TaskId, + tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetID(), taskTemplate.GetMetadata().GetDiscoveryVersion(), + taskTemplate.GetMetadata().GetCacheIgnoreInputVars(), iface, &tasksToCache, inputReaders, outputReaders) if err != nil { return nil, externalResources, err @@ -292,6 +293,7 @@ func WriteToDiscovery(ctx context.Context, tCtx core.TaskExecutionContext, state externalResources = make([]*core.ExternalResource, 0) for idx, phaseIdx := range state.ArrayStatus.Detailed.GetItems() { originalIdx := arrayCore.CalculateOriginalIndex(idx, state.GetIndexesToCache()) + // #nosec G115 if !tasksToCache.IsSet(uint(originalIdx)) { continue } @@ -299,8 +301,8 @@ func WriteToDiscovery(ctx context.Context, tCtx core.TaskExecutionContext, state externalResources = append(externalResources, &core.ExternalResource{ CacheStatus: idlCore.CatalogCacheStatus_CACHE_POPULATED, - Index: uint32(originalIdx), - RetryAttempt: uint32(state.RetryAttempts.GetItem(idx)), + Index: uint32(originalIdx), // #nosec G115 + RetryAttempt: uint32(state.RetryAttempts.GetItem(idx)), // #nosec G115 Phase: core.Phases[phaseIdx], }, ) @@ -337,8 +339,8 @@ func WriteToCatalog(ctx context.Context, ownerSignal core.SignalAsync, catalogCl return false, nil } -func ConstructCatalogUploadRequests(keyID idlCore.Identifier, taskExecID idlCore.TaskExecutionIdentifier, - cacheVersion string, cacheIgnoreInputVars []string, taskInterface idlCore.TypedInterface, whichTasksToCache *bitarray.BitSet, +func ConstructCatalogUploadRequests(keyID *idlCore.Identifier, taskExecID idlCore.TaskExecutionIdentifier, + cacheVersion string, cacheIgnoreInputVars []string, taskInterface *idlCore.TypedInterface, whichTasksToCache *bitarray.BitSet, inputReaders []io.InputReader, outputReaders []io.OutputReader) ([]catalog.UploadRequest, error) { writerWorkItems := make([]catalog.UploadRequest, 0, len(inputReaders)) @@ -349,17 +351,18 @@ func ConstructCatalogUploadRequests(keyID idlCore.Identifier, taskExecID idlCore } for idx, input := range inputReaders { + // #nosec G115 if !whichTasksToCache.IsSet(uint(idx)) { continue } wi := catalog.UploadRequest{ Key: catalog.Key{ - Identifier: keyID, + Identifier: *keyID, InputReader: input, CacheVersion: cacheVersion, CacheIgnoreInputVars: cacheIgnoreInputVars, - TypedInterface: taskInterface, + TypedInterface: *taskInterface, }, ArtifactData: outputReaders[idx], ArtifactMetadata: catalog.Metadata{ @@ -400,6 +403,7 @@ func NewLiteralScalarOfInteger(number int64) *idlCore.Literal { func CatalogBitsetToLiteralCollection(catalogResults *bitarray.BitSet, size int) *idlCore.LiteralCollection { literals := make([]*idlCore.Literal, 0, size) for i := 0; i < size; i++ { + // #nosec G115 if !catalogResults.IsSet(uint(i)) { literals = append(literals, NewLiteralScalarOfInteger(int64(i))) } @@ -410,15 +414,15 @@ func CatalogBitsetToLiteralCollection(catalogResults *bitarray.BitSet, size int) } func makeSingularTaskInterface(varMap *idlCore.VariableMap) *idlCore.VariableMap { - if varMap == nil || len(varMap.Variables) == 0 { + if varMap == nil || len(varMap.GetVariables()) == 0 { return varMap } res := &idlCore.VariableMap{ - Variables: make(map[string]*idlCore.Variable, len(varMap.Variables)), + Variables: make(map[string]*idlCore.Variable, len(varMap.GetVariables())), } - for key, val := range varMap.Variables { + for key, val := range varMap.GetVariables() { if val.GetType().GetCollectionType() != nil { res.Variables[key] = &idlCore.Variable{Type: val.GetType().GetCollectionType()} } else { @@ -440,17 +444,17 @@ func ConstructCatalogReaderWorkItems(ctx context.Context, taskReader core.TaskRe workItems := make([]catalog.DownloadRequest, 0, len(inputs)) - iface := *t.Interface - iface.Outputs = makeSingularTaskInterface(iface.Outputs) + iface := t.GetInterface() + iface.Outputs = makeSingularTaskInterface(iface.GetOutputs()) for idx, inputReader := range inputs { // TODO: Check if Identifier or Interface are empty and return err item := catalog.DownloadRequest{ Key: catalog.Key{ - Identifier: *t.Id, - CacheVersion: t.GetMetadata().DiscoveryVersion, + Identifier: *t.Id, //nolint:protogetter + CacheVersion: t.GetMetadata().GetDiscoveryVersion(), InputReader: inputReader, - TypedInterface: iface, + TypedInterface: *iface, }, Target: outputs[idx], } @@ -471,7 +475,7 @@ func ConstructStaticInputReaders(inputPaths io.InputFilePaths, inputLiterals map for inputName, inputLiteral := range inputLiterals { if literalCollection = inputLiteral.GetCollection(); literalCollection != nil { // if literal is a collection then we need to retrieve the specific literal for this subtask index - literals[inputName] = literalCollection.Literals[i] + literals[inputName] = literalCollection.GetLiterals()[i] } else { literals[inputName] = inputLiteral } diff --git a/flyteplugins/go/tasks/plugins/array/catalog_test.go b/flyteplugins/go/tasks/plugins/array/catalog_test.go index 15a36a4dcf..296d2283d4 100644 --- a/flyteplugins/go/tasks/plugins/array/catalog_test.go +++ b/flyteplugins/go/tasks/plugins/array/catalog_test.go @@ -102,19 +102,19 @@ var ( func TestNewLiteralScalarOfInteger(t *testing.T) { l := NewLiteralScalarOfInteger(int64(65)) - assert.Equal(t, int64(65), l.Value.(*core.Literal_Scalar).Scalar.Value.(*core.Scalar_Primitive). - Primitive.Value.(*core.Primitive_Integer).Integer) + assert.Equal(t, int64(65), l.GetValue().(*core.Literal_Scalar).Scalar.GetValue().(*core.Scalar_Primitive). + Primitive.GetValue().(*core.Primitive_Integer).Integer) } func TestCatalogBitsetToLiteralCollection(t *testing.T) { ba := bitarray.NewBitSet(3) ba.Set(1) lc := CatalogBitsetToLiteralCollection(ba, 3) - assert.Equal(t, 2, len(lc.Literals)) - assert.Equal(t, int64(0), lc.Literals[0].Value.(*core.Literal_Scalar).Scalar.Value.(*core.Scalar_Primitive). - Primitive.Value.(*core.Primitive_Integer).Integer) - assert.Equal(t, int64(2), lc.Literals[1].Value.(*core.Literal_Scalar).Scalar.Value.(*core.Scalar_Primitive). - Primitive.Value.(*core.Primitive_Integer).Integer) + assert.Equal(t, 2, len(lc.GetLiterals())) + assert.Equal(t, int64(0), lc.GetLiterals()[0].GetValue().(*core.Literal_Scalar).Scalar.GetValue().(*core.Scalar_Primitive). + Primitive.GetValue().(*core.Primitive_Integer).Integer) + assert.Equal(t, int64(2), lc.GetLiterals()[1].GetValue().(*core.Literal_Scalar).Scalar.GetValue().(*core.Scalar_Primitive). + Primitive.GetValue().(*core.Primitive_Integer).Integer) } func runDetermineDiscoverabilityTest(t testing.TB, taskTemplate *core.TaskTemplate, future catalog.DownloadFuture, diff --git a/flyteplugins/go/tasks/plugins/array/core/metadata.go b/flyteplugins/go/tasks/plugins/array/core/metadata.go index 4ac7c71b4c..fcbaa3456d 100644 --- a/flyteplugins/go/tasks/plugins/array/core/metadata.go +++ b/flyteplugins/go/tasks/plugins/array/core/metadata.go @@ -29,10 +29,11 @@ func InitializeExternalResources(ctx context.Context, tCtx core.TaskExecutionCon var childIndex int var phase core.Phase + // #nosec G115 if state.IndexesToCache.IsSet(uint(i)) { // if not cached set to PhaseUndefined and set cacheStatus according to Discoverable phase = core.PhaseUndefined - if taskTemplate.Metadata == nil || !taskTemplate.Metadata.Discoverable { + if taskTemplate.GetMetadata() == nil || !taskTemplate.GetMetadata().GetDiscoverable() { cacheStatus = idlCore.CatalogCacheStatus_CACHE_DISABLED } else { cacheStatus = idlCore.CatalogCacheStatus_CACHE_MISS @@ -54,7 +55,7 @@ func InitializeExternalResources(ctx context.Context, tCtx core.TaskExecutionCon externalResources[i] = &core.ExternalResource{ ExternalID: subTaskID, CacheStatus: cacheStatus, - Index: uint32(i), + Index: uint32(i), // #nosec G115 Logs: nil, RetryAttempt: 0, Phase: phase, diff --git a/flyteplugins/go/tasks/plugins/array/core/metadata_test.go b/flyteplugins/go/tasks/plugins/array/core/metadata_test.go index 262bd3b822..370af258c7 100644 --- a/flyteplugins/go/tasks/plugins/array/core/metadata_test.go +++ b/flyteplugins/go/tasks/plugins/array/core/metadata_test.go @@ -17,9 +17,9 @@ func TestInitializeExternalResources(t *testing.T) { subTaskCount := 10 cachedCount := 4 - indexesToCache := InvertBitSet(bitarray.NewBitSet(uint(subTaskCount)), uint(subTaskCount)) + indexesToCache := InvertBitSet(bitarray.NewBitSet(uint(subTaskCount)), uint(subTaskCount)) // #nosec G115 for i := 0; i < cachedCount; i++ { - indexesToCache.Clear(uint(i)) + indexesToCache.Clear(uint(i)) // #nosec G115 } tr := &mocks.TaskReader{} @@ -54,7 +54,7 @@ func TestInitializeExternalResources(t *testing.T) { assert.Nil(t, err) assert.Equal(t, subTaskCount, len(externalResources)) for i, externalResource := range externalResources { - assert.Equal(t, uint32(i), externalResource.Index) + assert.Equal(t, uint32(i), externalResource.Index) // #nosec G115 assert.Equal(t, 0, len(externalResource.Logs)) assert.Equal(t, uint32(0), externalResource.RetryAttempt) if i < cachedCount { diff --git a/flyteplugins/go/tasks/plugins/array/core/state.go b/flyteplugins/go/tasks/plugins/array/core/state.go index a540359b0a..8fcc85946b 100644 --- a/flyteplugins/go/tasks/plugins/array/core/state.go +++ b/flyteplugins/go/tasks/plugins/array/core/state.go @@ -303,7 +303,7 @@ func InvertBitSet(input *bitarray.BitSet, limit uint) *bitarray.BitSet { func NewPhasesCompactArray(count uint) bitarray.CompactArray { // TODO: This is fragile, we should introduce a TaskPhaseCount as the last element in the enum - a, err := bitarray.NewCompactArray(count, bitarray.Item(len(core.Phases)-1)) + a, err := bitarray.NewCompactArray(count, bitarray.Item(len(core.Phases)-1)) // #nosec G115 if err != nil { logger.Warnf(context.Background(), "Failed to create compact array with provided parameters [count: %v]", count) @@ -322,7 +322,7 @@ func CalculateOriginalIndex(childIdx int, toCache *bitarray.BitSet) int { } if childIdx+1 == sum { - return int(i) + return int(i) // #nosec G115 } } diff --git a/flyteplugins/go/tasks/plugins/array/core/state_test.go b/flyteplugins/go/tasks/plugins/array/core/state_test.go index 969c98df20..84ac17d315 100644 --- a/flyteplugins/go/tasks/plugins/array/core/state_test.go +++ b/flyteplugins/go/tasks/plugins/array/core/state_test.go @@ -27,7 +27,7 @@ func TestInvertBitSet(t *testing.T) { assertBitSetsEqual(t, expected, actual, 4) } -func assertBitSetsEqual(t testing.TB, b1, b2 *bitarray.BitSet, len int) { +func assertBitSetsEqual(t testing.TB, b1, b2 *bitarray.BitSet, len uint) { if b1 == nil { assert.Nil(t, b2) } else if b2 == nil { @@ -35,7 +35,7 @@ func assertBitSetsEqual(t testing.TB, b1, b2 *bitarray.BitSet, len int) { } assert.Equal(t, b1.Cap(), b2.Cap()) - for i := uint(0); i < uint(len); i++ { + for i := uint(0); i < len; i++ { assert.Equal(t, b1.IsSet(i), b2.IsSet(i), "At index %v", i) } } @@ -43,11 +43,11 @@ func assertBitSetsEqual(t testing.TB, b1, b2 *bitarray.BitSet, len int) { func TestMapArrayStateToPluginPhase(t *testing.T) { ctx := context.Background() - subTaskCount := 3 + subTaskCount := uint(3) - detailedArray := NewPhasesCompactArray(uint(subTaskCount)) - indexesToCache := InvertBitSet(bitarray.NewBitSet(uint(subTaskCount)), uint(subTaskCount)) - retryAttemptsArray, err := bitarray.NewCompactArray(uint(subTaskCount), bitarray.Item(1)) + detailedArray := NewPhasesCompactArray(subTaskCount) + indexesToCache := InvertBitSet(bitarray.NewBitSet(subTaskCount), subTaskCount) + retryAttemptsArray, err := bitarray.NewCompactArray(subTaskCount, bitarray.Item(1)) assert.NoError(t, err) t.Run("start", func(t *testing.T) { diff --git a/flyteplugins/go/tasks/plugins/array/inputs.go b/flyteplugins/go/tasks/plugins/array/inputs.go index e0a7035181..8e4e746530 100644 --- a/flyteplugins/go/tasks/plugins/array/inputs.go +++ b/flyteplugins/go/tasks/plugins/array/inputs.go @@ -20,7 +20,7 @@ func (i arrayJobInputReader) GetInputPath() storage.DataReference { } func GetInputReader(tCtx core.TaskExecutionContext, taskTemplate *idlCore.TaskTemplate) io.InputReader { - if taskTemplate.GetTaskTypeVersion() == 0 && taskTemplate.Type != AwsBatchTaskType { + if taskTemplate.GetTaskTypeVersion() == 0 && taskTemplate.GetType() != AwsBatchTaskType { // Prior to task type version == 1, dynamic type tasks (including array tasks) would write input files for each // individual array task instance. In this case we use a modified input reader to only pass in the parent input // directory. diff --git a/flyteplugins/go/tasks/plugins/array/k8s/management.go b/flyteplugins/go/tasks/plugins/array/k8s/management.go index 12eea118cc..e64c3e601a 100644 --- a/flyteplugins/go/tasks/plugins/array/k8s/management.go +++ b/flyteplugins/go/tasks/plugins/array/k8s/management.go @@ -69,7 +69,7 @@ func LaunchAndCheckSubTasksState(ctx context.Context, tCtx core.TaskExecutionCon messageCollector := errorcollector.NewErrorMessageCollector() newArrayStatus := &arraystatus.ArrayStatus{ Summary: arraystatus.ArraySummary{}, - Detailed: arrayCore.NewPhasesCompactArray(uint(currentState.GetExecutionArraySize())), + Detailed: arrayCore.NewPhasesCompactArray(uint(currentState.GetExecutionArraySize())), // #nosec G115 } externalResources = make([]*core.ExternalResource, 0, len(currentState.GetArrayStatus().Detailed.GetItems())) @@ -82,7 +82,7 @@ func LaunchAndCheckSubTasksState(ctx context.Context, tCtx core.TaskExecutionCon // If the current State is newly minted then we must initialize RetryAttempts to track how many // times each subtask is executed. if len(currentState.RetryAttempts.GetItems()) == 0 { - count := uint(currentState.GetExecutionArraySize()) + count := uint(currentState.GetExecutionArraySize()) // #nosec G115 maxValue := bitarray.Item(tCtx.TaskExecutionMetadata().GetMaxAttempts()) retryAttemptsArray, err := bitarray.NewCompactArray(count, maxValue) @@ -104,7 +104,7 @@ func LaunchAndCheckSubTasksState(ctx context.Context, tCtx core.TaskExecutionCon // times the subtask failed due to system issues, this is necessary to correctly evaluate // interruptible subtasks. if len(currentState.SystemFailures.GetItems()) == 0 { - count := uint(currentState.GetExecutionArraySize()) + count := uint(currentState.GetExecutionArraySize()) // #nosec G115 maxValue := bitarray.Item(tCtx.TaskExecutionMetadata().GetMaxAttempts()) systemFailuresArray, err := bitarray.NewCompactArray(count, maxValue) @@ -134,13 +134,13 @@ func LaunchAndCheckSubTasksState(ctx context.Context, tCtx core.TaskExecutionCon return currentState, externalResources, errors.Errorf(errors.BadTaskSpecification, "Required value not set, taskTemplate is nil") } - arrayJob, err := arrayCore.ToArrayJob(taskTemplate.GetCustom(), taskTemplate.TaskTypeVersion) + arrayJob, err := arrayCore.ToArrayJob(taskTemplate.GetCustom(), taskTemplate.GetTaskTypeVersion()) if err != nil { return currentState, externalResources, err } currentParallelism := 0 - maxParallelism := int(arrayJob.Parallelism) + maxParallelism := int(arrayJob.GetParallelism()) currentSubTaskPhaseHash, err := currentState.GetArrayStatus().HashCode() if err != nil { @@ -155,7 +155,7 @@ func LaunchAndCheckSubTasksState(ctx context.Context, tCtx core.TaskExecutionCon retryAttempt++ newState.RetryAttempts.SetItem(childIdx, retryAttempt) } else if existingPhase.IsTerminal() { - newArrayStatus.Detailed.SetItem(childIdx, bitarray.Item(existingPhase)) + newArrayStatus.Detailed.SetItem(childIdx, bitarray.Item(existingPhase)) // #nosec G115 continue } @@ -246,12 +246,13 @@ func LaunchAndCheckSubTasksState(ctx context.Context, tCtx core.TaskExecutionCon } } + // #nosec G115 if actualPhase == core.PhaseRetryableFailure && uint32(retryAttempt+1) >= stCtx.TaskExecutionMetadata().GetMaxAttempts() { // If we see a retryable failure we must check if the number of retries exceeds the maximum // attempts. If so, transition to a permanent failure so that is not attempted again. actualPhase = core.PhasePermanentFailure } - newArrayStatus.Detailed.SetItem(childIdx, bitarray.Item(actualPhase)) + newArrayStatus.Detailed.SetItem(childIdx, bitarray.Item(actualPhase)) // #nosec G115 if actualPhase.IsTerminal() { err = deallocateResource(ctx, stCtx, config, podName) @@ -275,9 +276,9 @@ func LaunchAndCheckSubTasksState(ctx context.Context, tCtx core.TaskExecutionCon externalResources = append(externalResources, &core.ExternalResource{ ExternalID: podName, - Index: uint32(originalIdx), + Index: uint32(originalIdx), // #nosec G115 Logs: logLinks, - RetryAttempt: uint32(retryAttempt), + RetryAttempt: uint32(retryAttempt), // #nosec G115 Phase: actualPhase, }) @@ -383,15 +384,15 @@ func TerminateSubTasks(ctx context.Context, tCtx core.TaskExecutionContext, kube } else { externalResources = append(externalResources, &core.ExternalResource{ ExternalID: stCtx.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName(), - Index: uint32(originalIdx), - RetryAttempt: uint32(retryAttempt), + Index: uint32(originalIdx), // #nosec G115 + RetryAttempt: uint32(retryAttempt), // #nosec G115 Phase: core.PhaseAborted, }) } } if messageCollector.Length() > 0 { - return currentState, externalResources, fmt.Errorf(messageCollector.Summary(config.MaxErrorStringLength)) + return currentState, externalResources, fmt.Errorf(messageCollector.Summary(config.MaxErrorStringLength)) //nolint } return currentState.SetPhase(arrayCore.PhaseWriteToDiscoveryThenFail, currentState.PhaseVersion+1), externalResources, nil diff --git a/flyteplugins/go/tasks/plugins/array/k8s/management_test.go b/flyteplugins/go/tasks/plugins/array/k8s/management_test.go index 7100fbc34c..d1628f98a2 100644 --- a/flyteplugins/go/tasks/plugins/array/k8s/management_test.go +++ b/flyteplugins/go/tasks/plugins/array/k8s/management_test.go @@ -217,8 +217,10 @@ func TestCheckSubTasksState(t *testing.T) { OriginalArraySize: int64(subtaskCount), OriginalMinSuccesses: int64(subtaskCount), ArrayStatus: arraystatus.ArrayStatus{ + // #nosec G115 Detailed: arrayCore.NewPhasesCompactArray(uint(subtaskCount)), // set all tasks to core.PhaseUndefined }, + // #nosec G115 IndexesToCache: arrayCore.InvertBitSet(bitarray.NewBitSet(uint(subtaskCount)), uint(subtaskCount)), // set all tasks to be cached } @@ -254,8 +256,10 @@ func TestCheckSubTasksState(t *testing.T) { OriginalArraySize: int64(subtaskCount), OriginalMinSuccesses: int64(subtaskCount), ArrayStatus: arraystatus.ArrayStatus{ + // #nosec G115 Detailed: arrayCore.NewPhasesCompactArray(uint(subtaskCount)), // set all tasks to core.PhaseUndefined }, + // #nosec G115 IndexesToCache: arrayCore.InvertBitSet(bitarray.NewBitSet(uint(subtaskCount)), uint(subtaskCount)), // set all tasks to be cached } @@ -296,8 +300,10 @@ func TestCheckSubTasksState(t *testing.T) { OriginalArraySize: int64(subtaskCount), OriginalMinSuccesses: int64(subtaskCount), ArrayStatus: arraystatus.ArrayStatus{ + // #nosec G115 Detailed: arrayCore.NewPhasesCompactArray(uint(subtaskCount)), // set all tasks to core.PhaseUndefined }, + // #nosec G115 IndexesToCache: arrayCore.InvertBitSet(bitarray.NewBitSet(uint(subtaskCount)), uint(subtaskCount)), // set all tasks to be cached } @@ -342,12 +348,12 @@ func TestCheckSubTasksState(t *testing.T) { tCtx := getMockTaskExecutionContext(ctx, 0) tCtx.OnResourceManager().Return(&resourceManager) - detailed := arrayCore.NewPhasesCompactArray(uint(subtaskCount)) + detailed := arrayCore.NewPhasesCompactArray(uint(subtaskCount)) // #nosec G115 for i := 0; i < subtaskCount; i++ { detailed.SetItem(i, bitarray.Item(core.PhaseRetryableFailure)) // set all tasks to core.PhaseRetryableFailure } - retryAttemptsArray, err := bitarray.NewCompactArray(uint(subtaskCount), bitarray.Item(1)) + retryAttemptsArray, err := bitarray.NewCompactArray(uint(subtaskCount), bitarray.Item(1)) // #nosec G115 assert.NoError(t, err) currentState := &arrayCore.State{ @@ -358,6 +364,7 @@ func TestCheckSubTasksState(t *testing.T) { ArrayStatus: arraystatus.ArrayStatus{ Detailed: detailed, }, + // #nosec G115 IndexesToCache: arrayCore.InvertBitSet(bitarray.NewBitSet(uint(subtaskCount)), uint(subtaskCount)), // set all tasks to be cached RetryAttempts: retryAttemptsArray, } @@ -411,8 +418,9 @@ func TestCheckSubTasksState(t *testing.T) { tCtx := getMockTaskExecutionContext(ctx, 0) tCtx.OnResourceManager().Return(&resourceManager) - detailed := arrayCore.NewPhasesCompactArray(uint(subtaskCount)) + detailed := arrayCore.NewPhasesCompactArray(uint(subtaskCount)) // #nosec G115 for i := 0; i < subtaskCount; i++ { + // #nosec G115 detailed.SetItem(i, bitarray.Item(core.PhaseRunning)) // set all tasks to core.PhaseRunning } @@ -424,6 +432,7 @@ func TestCheckSubTasksState(t *testing.T) { ArrayStatus: arraystatus.ArrayStatus{ Detailed: detailed, }, + // #nosec G115 IndexesToCache: arrayCore.InvertBitSet(bitarray.NewBitSet(uint(subtaskCount)), uint(subtaskCount)), // set all tasks to be cached } @@ -445,10 +454,10 @@ func TestCheckSubTasksState(t *testing.T) { logLinks := externalResource.Logs assert.Equal(t, 2, len(logLinks)) - assert.Equal(t, fmt.Sprintf("Kubernetes Logs #0-%d", i), logLinks[0].Name) - assert.Equal(t, fmt.Sprintf("k8s/log/a-n-b/notfound-%d/pod?namespace=a-n-b", i), logLinks[0].Uri) - assert.Equal(t, fmt.Sprintf("Cloudwatch Logs #0-%d", i), logLinks[1].Name) - assert.Equal(t, fmt.Sprintf("https://console.aws.amazon.com/cloudwatch/home?region=us-east-1#logStream:group=/kubernetes/flyte;prefix=var.log.containers.notfound-%d;streamFilter=typeLogStreamPrefix", i), logLinks[1].Uri) + assert.Equal(t, fmt.Sprintf("Kubernetes Logs #0-%d", i), logLinks[0].GetName()) + assert.Equal(t, fmt.Sprintf("k8s/log/a-n-b/notfound-%d/pod?namespace=a-n-b", i), logLinks[0].GetUri()) + assert.Equal(t, fmt.Sprintf("Cloudwatch Logs #0-%d", i), logLinks[1].GetName()) + assert.Equal(t, fmt.Sprintf("https://console.aws.amazon.com/cloudwatch/home?region=us-east-1#logStream:group=/kubernetes/flyte;prefix=var.log.containers.notfound-%d;streamFilter=typeLogStreamPrefix", i), logLinks[1].GetUri()) } }) @@ -464,12 +473,13 @@ func TestCheckSubTasksState(t *testing.T) { tCtx := getMockTaskExecutionContext(ctx, 0) tCtx.OnResourceManager().Return(&resourceManager) - detailed := arrayCore.NewPhasesCompactArray(uint(subtaskCount)) + detailed := arrayCore.NewPhasesCompactArray(uint(subtaskCount)) // #nosec G115 for i := 0; i < subtaskCount; i++ { + // #nosec G115 detailed.SetItem(i, bitarray.Item(core.PhaseRunning)) // set all tasks to core.PhaseRunning } - retryAttemptsArray, err := bitarray.NewCompactArray(uint(subtaskCount), bitarray.Item(1)) + retryAttemptsArray, err := bitarray.NewCompactArray(uint(subtaskCount), bitarray.Item(1)) // #nosec G115 assert.NoError(t, err) currentState := &arrayCore.State{ @@ -480,6 +490,7 @@ func TestCheckSubTasksState(t *testing.T) { ArrayStatus: arraystatus.ArrayStatus{ Detailed: detailed, }, + // #nosec G115 IndexesToCache: arrayCore.InvertBitSet(bitarray.NewBitSet(uint(subtaskCount)), uint(subtaskCount)), // set all tasks to be cached RetryAttempts: retryAttemptsArray, } @@ -509,11 +520,13 @@ func TestCheckSubTasksState(t *testing.T) { tCtx := getMockTaskExecutionContext(ctx, 0) tCtx.OnResourceManager().Return(&resourceManager) + // #nosec G115 detailed := arrayCore.NewPhasesCompactArray(uint(subtaskCount)) for i := 0; i < subtaskCount; i++ { detailed.SetItem(i, bitarray.Item(core.PhaseRunning)) // set all tasks to core.PhaseRunning } + // #nosec G115 retryAttemptsArray, err := bitarray.NewCompactArray(uint(subtaskCount), bitarray.Item(1)) assert.NoError(t, err) @@ -529,6 +542,7 @@ func TestCheckSubTasksState(t *testing.T) { ArrayStatus: arraystatus.ArrayStatus{ Detailed: detailed, }, + // #nosec G115 IndexesToCache: arrayCore.InvertBitSet(bitarray.NewBitSet(uint(subtaskCount)), uint(subtaskCount)), // set all tasks to be cached RetryAttempts: retryAttemptsArray, } @@ -561,7 +575,7 @@ func TestTerminateSubTasksOnAbort(t *testing.T) { kubeClient.OnGetClient().Return(mocks.NewFakeKubeClient()) kubeClient.OnGetCache().Return(mocks.NewFakeKubeCache()) - compactArray := arrayCore.NewPhasesCompactArray(uint(subtaskCount)) + compactArray := arrayCore.NewPhasesCompactArray(uint(subtaskCount)) // #nosec G115 for i := 0; i < subtaskCount; i++ { compactArray.SetItem(i, 5) } @@ -574,6 +588,7 @@ func TestTerminateSubTasksOnAbort(t *testing.T) { ArrayStatus: arraystatus.ArrayStatus{ Detailed: compactArray, }, + // #nosec G115 IndexesToCache: arrayCore.InvertBitSet(bitarray.NewBitSet(uint(subtaskCount)), uint(subtaskCount)), } @@ -652,9 +667,10 @@ func TestTerminateSubTasks(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + // #nosec G115 compactArray := arrayCore.NewPhasesCompactArray(uint(subtaskCount)) for i, phaseIdx := range test.initialPhaseIndices { - compactArray.SetItem(i, bitarray.Item(phaseIdx)) + compactArray.SetItem(i, bitarray.Item(phaseIdx)) // #nosec G115 } currentState := &arrayCore.State{ CurrentPhase: arrayCore.PhaseCheckingSubTaskExecutions, @@ -665,6 +681,7 @@ func TestTerminateSubTasks(t *testing.T) { ArrayStatus: arraystatus.ArrayStatus{ Detailed: compactArray, }, + // #nosec G115 IndexesToCache: arrayCore.InvertBitSet(bitarray.NewBitSet(uint(subtaskCount)), uint(subtaskCount)), } diff --git a/flyteplugins/go/tasks/plugins/array/k8s/subtask_exec_context.go b/flyteplugins/go/tasks/plugins/array/k8s/subtask_exec_context.go index b76fe70d28..d0e483257d 100644 --- a/flyteplugins/go/tasks/plugins/array/k8s/subtask_exec_context.go +++ b/flyteplugins/go/tasks/plugins/array/k8s/subtask_exec_context.go @@ -192,7 +192,7 @@ func (s SubTaskExecutionID) TemplateVarsByScheme() []tasklog.TemplateVar { {Regex: LogTemplateRegexes.ParentName, Value: s.parentName}, { Regex: LogTemplateRegexes.ExecutionIndex, - Value: strconv.FormatUint(uint64(s.executionIndex), 10), + Value: strconv.FormatUint(uint64(s.executionIndex), 10), // #nosec G115 }, { Regex: LogTemplateRegexes.RetryAttempt, @@ -212,7 +212,7 @@ func NewSubTaskExecutionID(taskExecutionID pluginsCore.TaskExecutionID, executio executionIndex, taskExecutionID.GetGeneratedName(), retryAttempt, - taskExecutionID.GetID().RetryAttempt, + taskExecutionID.GetID().RetryAttempt, //nolint:protogetter } } @@ -252,8 +252,8 @@ func NewSubTaskExecutionMetadata(taskExecutionMetadata pluginsCore.TaskExecution var err error secretsMap := make(map[string]string) injectSecretsLabel := make(map[string]string) - if taskTemplate.SecurityContext != nil && len(taskTemplate.SecurityContext.Secrets) > 0 { - secretsMap, err = secrets.MarshalSecretsToMapStrings(taskTemplate.SecurityContext.Secrets) + if taskTemplate.GetSecurityContext() != nil && len(taskTemplate.GetSecurityContext().GetSecrets()) > 0 { + secretsMap, err = secrets.MarshalSecretsToMapStrings(taskTemplate.GetSecurityContext().GetSecrets()) if err != nil { return SubTaskExecutionMetadata{}, err } @@ -264,6 +264,7 @@ func NewSubTaskExecutionMetadata(taskExecutionMetadata pluginsCore.TaskExecution } subTaskExecutionID := NewSubTaskExecutionID(taskExecutionMetadata.GetTaskExecutionID(), executionIndex, retryAttempt) + // #nosec G115 interruptible := taskExecutionMetadata.IsInterruptible() && int32(systemFailures) < taskExecutionMetadata.GetInterruptibleFailureThreshold() return SubTaskExecutionMetadata{ taskExecutionMetadata, diff --git a/flyteplugins/go/tasks/plugins/array/k8s/subtask_exec_context_test.go b/flyteplugins/go/tasks/plugins/array/k8s/subtask_exec_context_test.go index a7f5aa20b4..83aead4f5e 100644 --- a/flyteplugins/go/tasks/plugins/array/k8s/subtask_exec_context_test.go +++ b/flyteplugins/go/tasks/plugins/array/k8s/subtask_exec_context_test.go @@ -31,8 +31,8 @@ func TestSubTaskExecutionContext(t *testing.T) { subtaskTemplate, err := stCtx.TaskReader().Read(ctx) assert.Nil(t, err) - assert.Equal(t, int32(2), subtaskTemplate.TaskTypeVersion) - assert.Equal(t, podPlugin.ContainerTaskType, subtaskTemplate.Type) + assert.Equal(t, int32(2), subtaskTemplate.GetTaskTypeVersion()) + assert.Equal(t, podPlugin.ContainerTaskType, subtaskTemplate.GetType()) assert.Equal(t, storage.DataReference("/prefix/"), stCtx.OutputWriter().GetOutputPrefixPath()) assert.Equal(t, storage.DataReference("/raw_prefix/5/1"), stCtx.OutputWriter().GetRawOutputPrefix()) assert.Equal(t, diff --git a/flyteplugins/go/tasks/plugins/array/outputs.go b/flyteplugins/go/tasks/plugins/array/outputs.go index cb07fb0de1..611442de98 100644 --- a/flyteplugins/go/tasks/plugins/array/outputs.go +++ b/flyteplugins/go/tasks/plugins/array/outputs.go @@ -52,6 +52,7 @@ type assembleOutputsWorker struct { func (w assembleOutputsWorker) Process(ctx context.Context, workItem workqueue.WorkItem) (workqueue.WorkStatus, error) { i := workItem.(*outputAssembleItem) + // #nosec G115 outputReaders, err := ConstructOutputReaders(ctx, i.dataStore, i.outputPaths.GetOutputPrefixPath(), i.outputPaths.GetRawOutputPrefix(), int(i.finalPhases.ItemsCount)) if err != nil { logger.Warnf(ctx, "Failed to construct output readers. Error: %v", err) @@ -89,7 +90,7 @@ func (w assembleOutputsWorker) Process(ctx context.Context, workItem workqueue.W // to aggregate outputs here finalOutputs.Literals = output.GetLiterals() } else { - appendSubTaskOutput(finalOutputs, output, int64(i.finalPhases.ItemsCount)) + appendSubTaskOutput(finalOutputs, output, int64(i.finalPhases.ItemsCount)) // #nosec G115 continue } } @@ -110,7 +111,7 @@ func (w assembleOutputsWorker) Process(ctx context.Context, workItem workqueue.W } func appendOneItem(outputs *core.LiteralMap, varName string, literal *core.Literal, expectedSize int64) { - existingVal, found := outputs.Literals[varName] + existingVal, found := outputs.GetLiterals()[varName] var list *core.LiteralCollection if found { list = existingVal.GetCollection() @@ -155,7 +156,7 @@ func buildFinalPhases(executedTasks bitarray.CompactArray, indexes *bitarray.Bit // Set phases os already discovered tasks to success for i := uint(0); i < totalSize; i++ { if !indexes.IsSet(i) { - res.SetItem(int(i), bitarray.Item(pluginCore.PhaseSuccess)) + res.SetItem(int(i), bitarray.Item(pluginCore.PhaseSuccess)) // #nosec G115 } } @@ -199,14 +200,14 @@ func AssembleFinalOutputs(ctx context.Context, assemblyQueue OutputAssembler, tC } finalPhases := buildFinalPhases(state.GetArrayStatus().Detailed, - state.GetIndexesToCache(), uint(state.GetOriginalArraySize())) + state.GetIndexesToCache(), uint(state.GetOriginalArraySize())) // #nosec G115 err = assemblyQueue.Queue(ctx, workItemID, &outputAssembleItem{ varNames: varNames, finalPhases: finalPhases, outputPaths: tCtx.OutputWriter(), dataStore: tCtx.DataStore(), - isAwsSingleJob: taskTemplate.Type == AwsBatchTaskType, + isAwsSingleJob: taskTemplate.GetType() == AwsBatchTaskType, }) if err != nil { @@ -274,6 +275,7 @@ type assembleErrorsWorker struct { func (a assembleErrorsWorker) Process(ctx context.Context, workItem workqueue.WorkItem) (workqueue.WorkStatus, error) { w := workItem.(*outputAssembleItem) + // #nosec G115 outputReaders, err := ConstructOutputReaders(ctx, w.dataStore, w.outputPaths.GetOutputPrefixPath(), w.outputPaths.GetRawOutputPrefix(), int(w.finalPhases.ItemsCount)) if err != nil { return workqueue.WorkStatusNotDone, err diff --git a/flyteplugins/go/tasks/plugins/hive/execution_state.go b/flyteplugins/go/tasks/plugins/hive/execution_state.go index 16ac3835bd..b1d971d0d3 100644 --- a/flyteplugins/go/tasks/plugins/hive/execution_state.go +++ b/flyteplugins/go/tasks/plugins/hive/execution_state.go @@ -116,7 +116,7 @@ func MapExecutionStateToPhaseInfo(state ExecutionState, _ client.QuboleClient) c if state.CreationFailureCount > 5 { phaseInfo = core.PhaseInfoSystemRetryableFailure("QuboleFailure", "Too many creation attempts", nil) } else { - phaseInfo = core.PhaseInfoQueued(t, uint32(state.CreationFailureCount), "Waiting for Qubole launch") + phaseInfo = core.PhaseInfoQueued(t, uint32(state.CreationFailureCount), "Waiting for Qubole launch") // #nosec G115 } case PhaseSubmitted: phaseInfo = core.PhaseInfoRunning(core.DefaultPhaseVersion, ConstructTaskInfo(state)) @@ -240,7 +240,7 @@ func GetAllocationToken(ctx context.Context, tCtx core.TaskExecutionContext, cur } func validateQuboleHiveJob(hiveJob plugins.QuboleHiveJob) error { - if hiveJob.Query == nil { + if hiveJob.GetQuery() == nil { return errors.Errorf(errors.BadTaskSpecification, "Query could not be found. Please ensure that you are at least on Flytekit version 0.3.0 or later.") } @@ -267,7 +267,7 @@ func GetQueryInfo(ctx context.Context, tCtx core.TaskExecutionContext) ( return "", "", []string{}, 0, "", err } - query := hiveJob.Query.GetQuery() + query := hiveJob.GetQuery().GetQuery() outputs, err := template.Render(ctx, []string{query}, template.Parameters{ @@ -281,10 +281,10 @@ func GetQueryInfo(ctx context.Context, tCtx core.TaskExecutionContext) ( } formattedQuery = outputs[0] - cluster = hiveJob.ClusterLabel - timeoutSec = hiveJob.Query.TimeoutSec - taskName = taskTemplate.Id.Name - tags = hiveJob.Tags + cluster = hiveJob.GetClusterLabel() + timeoutSec = hiveJob.GetQuery().GetTimeoutSec() + taskName = taskTemplate.GetId().GetName() + tags = hiveJob.GetTags() tags = append(tags, fmt.Sprintf("ns:%s", tCtx.TaskExecutionMetadata().GetNamespace())) for k, v := range tCtx.TaskExecutionMetadata().GetLabels() { tags = append(tags, fmt.Sprintf("%s:%s", k, v)) @@ -326,8 +326,8 @@ func mapLabelToPrimaryLabel(ctx context.Context, quboleCfg *config.Config, label func mapProjectDomainToDestinationClusterLabel(ctx context.Context, tCtx core.TaskExecutionContext, quboleCfg *config.Config) (string, bool) { tExecID := tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetID() - project := tExecID.NodeExecutionId.GetExecutionId().GetProject() - domain := tExecID.NodeExecutionId.GetExecutionId().GetDomain() + project := tExecID.GetNodeExecutionId().GetExecutionId().GetProject() + domain := tExecID.GetNodeExecutionId().GetExecutionId().GetDomain() logger.Debugf(ctx, "No clusterLabelOverride. Finding the pre-defined cluster label for (project: %v, domain: %v)", project, domain) // Using a linear search because N is small for _, m := range quboleCfg.DestinationClusterConfigs { @@ -504,7 +504,7 @@ func WriteOutputs(ctx context.Context, tCtx core.TaskExecutionContext, currentSt } externalLocation := tCtx.OutputWriter().GetRawOutputPrefix() - outputs := taskTemplate.Interface.Outputs.GetVariables() + outputs := taskTemplate.GetInterface().GetOutputs().GetVariables() if len(outputs) != 0 && len(outputs) != 1 { return currentState, errors.Errorf(errors.BadTaskSpecification, "Hive tasks must have zero or one output: [%d] found", len(outputs)) } diff --git a/flyteplugins/go/tasks/plugins/hive/execution_state_test.go b/flyteplugins/go/tasks/plugins/hive/execution_state_test.go index 4e34a04593..d67a53bb10 100644 --- a/flyteplugins/go/tasks/plugins/hive/execution_state_test.go +++ b/flyteplugins/go/tasks/plugins/hive/execution_state_test.go @@ -101,7 +101,7 @@ func TestConstructTaskLog(t *testing.T) { u, err := url.Parse(expected) assert.NoError(t, err) taskLog := ConstructTaskLog(ExecutionState{CommandID: "123", URI: u.String()}) - assert.Equal(t, expected, taskLog.Uri) + assert.Equal(t, expected, taskLog.GetUri()) } func TestConstructTaskInfo(t *testing.T) { @@ -120,7 +120,7 @@ func TestConstructTaskInfo(t *testing.T) { } taskInfo := ConstructTaskInfo(e) - assert.Equal(t, "https://wellness.qubole.com/v2/analyze?command_id=123", taskInfo.Logs[0].Uri) + assert.Equal(t, "https://wellness.qubole.com/v2/analyze?command_id=123", taskInfo.Logs[0].GetUri()) assert.Len(t, taskInfo.ExternalResources, 1) assert.Equal(t, taskInfo.ExternalResources[0].ExternalID, "123") } @@ -358,7 +358,7 @@ func TestWriteOutputs(t *testing.T) { literals, err1, err2 := reader.Read(context.Background()) assert.Nil(t, err1) assert.NoError(t, err2) - assert.NotNil(t, literals.Literals["results"].GetScalar().GetSchema()) + assert.NotNil(t, literals.GetLiterals()["results"].GetScalar().GetSchema()) }) state := ExecutionState{} diff --git a/flyteplugins/go/tasks/plugins/hive/executions_cache.go b/flyteplugins/go/tasks/plugins/hive/executions_cache.go index 40885ab093..6ce2fcf6e2 100644 --- a/flyteplugins/go/tasks/plugins/hive/executions_cache.go +++ b/flyteplugins/go/tasks/plugins/hive/executions_cache.go @@ -39,7 +39,8 @@ func NewQuboleHiveExecutionsCache(ctx context.Context, quboleClient client.Qubol scope: scope, cfg: cfg, } - autoRefreshCache, err := cache.NewAutoRefreshCache("qubole", q.SyncQuboleQuery, workqueue.DefaultControllerRateLimiter(), ResyncDuration, cfg.Workers, cfg.LruCacheSize, scope) + // #nosec G115 + autoRefreshCache, err := cache.NewAutoRefreshCache("qubole", q.SyncQuboleQuery, workqueue.DefaultControllerRateLimiter(), ResyncDuration, uint(cfg.Workers), uint(cfg.LruCacheSize), scope) if err != nil { logger.Errorf(ctx, "Could not create AutoRefreshCache in QuboleHiveExecutor. [%s]", err) return q, errors.Wrapf(errors.CacheFailed, err, "Error creating AutoRefreshCache") diff --git a/flyteplugins/go/tasks/plugins/k8s/dask/dask.go b/flyteplugins/go/tasks/plugins/k8s/dask/dask.go index ae68a4c760..df0e4f3472 100644 --- a/flyteplugins/go/tasks/plugins/k8s/dask/dask.go +++ b/flyteplugins/go/tasks/plugins/k8s/dask/dask.go @@ -20,7 +20,7 @@ import ( "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/flytek8s" "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/k8s" "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/tasklog" - "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/utils" + "github.com/flyteorg/flyte/flytestdlib/utils" ) const ( @@ -66,7 +66,7 @@ func (p daskResourceHandler) BuildResource(ctx context.Context, taskCtx pluginsC } daskJob := plugins.DaskJob{} - err = utils.UnmarshalStruct(taskTemplate.GetCustom(), &daskJob) + err = utils.UnmarshalStructToPb(taskTemplate.GetCustom(), &daskJob) if err != nil { return nil, errors.Wrapf(errors.BadTaskSpecification, err, "invalid TaskSpecification [%v], failed to unmarshal", taskTemplate.GetCustom()) } @@ -85,13 +85,13 @@ func (p daskResourceHandler) BuildResource(ctx context.Context, taskCtx pluginsC mergeMapInto(taskCtx.TaskExecutionMetadata().GetAnnotations(), objectMeta.Annotations) mergeMapInto(taskCtx.TaskExecutionMetadata().GetLabels(), objectMeta.Labels) - workerSpec, err := createWorkerSpec(*daskJob.Workers, podSpec, primaryContainerName) + workerSpec, err := createWorkerSpec(daskJob.GetWorkers(), podSpec, primaryContainerName) if err != nil { return nil, err } clusterName := taskCtx.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName() - schedulerSpec, err := createSchedulerSpec(*daskJob.Scheduler, clusterName, nonInterruptiblePodSpec, primaryContainerName) + schedulerSpec, err := createSchedulerSpec(daskJob.GetScheduler(), clusterName, nonInterruptiblePodSpec, primaryContainerName) if err != nil { return nil, err } @@ -112,7 +112,7 @@ func (p daskResourceHandler) BuildResource(ctx context.Context, taskCtx pluginsC return job, nil } -func createWorkerSpec(cluster plugins.DaskWorkerGroup, podSpec *v1.PodSpec, primaryContainerName string) (*daskAPI.WorkerSpec, error) { +func createWorkerSpec(cluster *plugins.DaskWorkerGroup, podSpec *v1.PodSpec, primaryContainerName string) (*daskAPI.WorkerSpec, error) { workerPodSpec := podSpec.DeepCopy() primaryContainer, err := flytek8s.GetContainer(workerPodSpec, primaryContainerName) if err != nil { @@ -128,7 +128,7 @@ func createWorkerSpec(cluster plugins.DaskWorkerGroup, podSpec *v1.PodSpec, prim // Set custom resources resources := &primaryContainer.Resources clusterResources := cluster.GetResources() - if len(clusterResources.Requests) >= 1 || len(clusterResources.Limits) >= 1 { + if len(clusterResources.GetRequests()) >= 1 || len(clusterResources.GetLimits()) >= 1 { resources, err = flytek8s.ToK8sResourceRequirements(cluster.GetResources()) if err != nil { return nil, err @@ -174,7 +174,7 @@ func createWorkerSpec(cluster plugins.DaskWorkerGroup, podSpec *v1.PodSpec, prim }, nil } -func createSchedulerSpec(scheduler plugins.DaskScheduler, clusterName string, podSpec *v1.PodSpec, primaryContainerName string) (*daskAPI.SchedulerSpec, error) { +func createSchedulerSpec(scheduler *plugins.DaskScheduler, clusterName string, podSpec *v1.PodSpec, primaryContainerName string) (*daskAPI.SchedulerSpec, error) { schedulerPodSpec := podSpec.DeepCopy() primaryContainer, err := flytek8s.GetContainer(schedulerPodSpec, primaryContainerName) if err != nil { @@ -190,7 +190,7 @@ func createSchedulerSpec(scheduler plugins.DaskScheduler, clusterName string, po // Override resources if applicable resources := &primaryContainer.Resources schedulerResources := scheduler.GetResources() - if len(schedulerResources.Requests) >= 1 || len(schedulerResources.Limits) >= 1 { + if len(schedulerResources.GetRequests()) >= 1 || len(schedulerResources.GetLimits()) >= 1 { resources, err = flytek8s.ToK8sResourceRequirements(scheduler.GetResources()) if err != nil { return nil, err diff --git a/flyteplugins/go/tasks/plugins/k8s/dask/dask_test.go b/flyteplugins/go/tasks/plugins/k8s/dask/dask_test.go index eba53067ef..bc8b4adef4 100644 --- a/flyteplugins/go/tasks/plugins/k8s/dask/dask_test.go +++ b/flyteplugins/go/tasks/plugins/k8s/dask/dask_test.go @@ -525,9 +525,10 @@ func TestBuildResouceDaskUsePodTemplate(t *testing.T) { func TestBuildResourceDaskExtendedResources(t *testing.T) { assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ - GpuDeviceNodeLabel: "gpu-node-label", - GpuPartitionSizeNodeLabel: "gpu-partition-size", - GpuResourceName: flytek8s.ResourceNvidiaGPU, + GpuDeviceNodeLabel: "gpu-node-label", + GpuPartitionSizeNodeLabel: "gpu-partition-size", + GpuResourceName: flytek8s.ResourceNvidiaGPU, + AddTolerationsForExtendedResources: []string{"nvidia.com/gpu"}, })) fixtures := []struct { @@ -569,6 +570,11 @@ func TestBuildResourceDaskExtendedResources(t *testing.T) { Operator: v1.TolerationOpEqual, Effect: v1.TaintEffectNoSchedule, }, + { + Key: "nvidia.com/gpu", + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + }, }, }, { @@ -620,6 +626,11 @@ func TestBuildResourceDaskExtendedResources(t *testing.T) { Operator: v1.TolerationOpEqual, Effect: v1.TaintEffectNoSchedule, }, + { + Key: "nvidia.com/gpu", + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + }, }, }, } diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/common/common_operator_test.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/common/common_operator_test.go index d0e154835c..9196c788cc 100644 --- a/flyteplugins/go/tasks/plugins/k8s/kfoperators/common/common_operator_test.go +++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/common/common_operator_test.go @@ -178,7 +178,7 @@ func TestGetLogs(t *testing.T) { jobLogs, err := GetLogs(taskCtx, MPITaskType, mpiJobObjectMeta, false, workers, launcher, 0, 0) assert.NoError(t, err) assert.Equal(t, 1, len(jobLogs)) - assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=mpi-namespace", "mpi-namespace", "test"), jobLogs[0].Uri) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=mpi-namespace", "mpi-namespace", "test"), jobLogs[0].GetUri()) pytorchJobObjectMeta := meta_v1.ObjectMeta{ Name: "test", @@ -187,8 +187,8 @@ func TestGetLogs(t *testing.T) { jobLogs, err = GetLogs(taskCtx, PytorchTaskType, pytorchJobObjectMeta, true, workers, launcher, 0, 0) assert.NoError(t, err) assert.Equal(t, 2, len(jobLogs)) - assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-master-0/pod?namespace=pytorch-namespace", "pytorch-namespace", "test"), jobLogs[0].Uri) - assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=pytorch-namespace", "pytorch-namespace", "test"), jobLogs[1].Uri) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-master-0/pod?namespace=pytorch-namespace", "pytorch-namespace", "test"), jobLogs[0].GetUri()) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=pytorch-namespace", "pytorch-namespace", "test"), jobLogs[1].GetUri()) tensorflowJobObjectMeta := meta_v1.ObjectMeta{ Name: "test", @@ -197,9 +197,9 @@ func TestGetLogs(t *testing.T) { jobLogs, err = GetLogs(taskCtx, TensorflowTaskType, tensorflowJobObjectMeta, false, workers, launcher, 1, 0) assert.NoError(t, err) assert.Equal(t, 3, len(jobLogs)) - assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=tensorflow-namespace", "tensorflow-namespace", "test"), jobLogs[0].Uri) - assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-psReplica-0/pod?namespace=tensorflow-namespace", "tensorflow-namespace", "test"), jobLogs[1].Uri) - assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-chiefReplica-0/pod?namespace=tensorflow-namespace", "tensorflow-namespace", "test"), jobLogs[2].Uri) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=tensorflow-namespace", "tensorflow-namespace", "test"), jobLogs[0].GetUri()) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-psReplica-0/pod?namespace=tensorflow-namespace", "tensorflow-namespace", "test"), jobLogs[1].GetUri()) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-chiefReplica-0/pod?namespace=tensorflow-namespace", "tensorflow-namespace", "test"), jobLogs[2].GetUri()) } @@ -221,8 +221,8 @@ func TestGetLogsTemplateUri(t *testing.T) { jobLogs, err := GetLogs(taskCtx, PytorchTaskType, pytorchJobObjectMeta, true, 1, 0, 0, 0) assert.NoError(t, err) assert.Equal(t, 2, len(jobLogs)) - assert.Equal(t, fmt.Sprintf("https://console.cloud.google.com/logs/query;query=resource.labels.pod_name=%s-master-0×tamp>%s", "test", "2022-01-01T12:00:00Z"), jobLogs[0].Uri) - assert.Equal(t, fmt.Sprintf("https://console.cloud.google.com/logs/query;query=resource.labels.pod_name=%s-worker-0×tamp>%s", "test", "2022-01-01T12:00:00Z"), jobLogs[1].Uri) + assert.Equal(t, fmt.Sprintf("https://console.cloud.google.com/logs/query;query=resource.labels.pod_name=%s-master-0×tamp>%s", "test", "2022-01-01T12:00:00Z"), jobLogs[0].GetUri()) + assert.Equal(t, fmt.Sprintf("https://console.cloud.google.com/logs/query;query=resource.labels.pod_name=%s-worker-0×tamp>%s", "test", "2022-01-01T12:00:00Z"), jobLogs[1].GetUri()) } func dummyPodSpec() v1.PodSpec { diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/mpi/mpi.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/mpi/mpi.go index 53e4d30ccb..7ba2c0cb86 100644 --- a/flyteplugins/go/tasks/plugins/k8s/kfoperators/mpi/mpi.go +++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/mpi/mpi.go @@ -60,7 +60,7 @@ func (mpiOperatorResourceHandler) BuildResource(ctx context.Context, taskCtx plu var launcherReplicaSpec, workerReplicaSpec *commonOp.ReplicaSpec - if taskTemplate.TaskTypeVersion == 0 { + if taskTemplate.GetTaskTypeVersion() == 0 { mpiTaskExtraArgs := plugins.DistributedMPITrainingTask{} err = utils.UnmarshalStruct(taskTemplate.GetCustom(), &mpiTaskExtraArgs) if err != nil { @@ -98,7 +98,7 @@ func (mpiOperatorResourceHandler) BuildResource(ctx context.Context, taskCtx plu } } - } else if taskTemplate.TaskTypeVersion == 1 { + } else if taskTemplate.GetTaskTypeVersion() == 1 { kfMPITaskExtraArgs := kfplugins.DistributedMPITrainingTask{} err = utils.UnmarshalStruct(taskTemplate.GetCustom(), &kfMPITaskExtraArgs) @@ -122,7 +122,7 @@ func (mpiOperatorResourceHandler) BuildResource(ctx context.Context, taskCtx plu } else { return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, - "Invalid TaskSpecification, unsupported task template version [%v] key", taskTemplate.TaskTypeVersion) + "Invalid TaskSpecification, unsupported task template version [%v] key", taskTemplate.GetTaskTypeVersion()) } if *workerReplicaSpec.Replicas <= 0 { diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/mpi/mpi_test.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/mpi/mpi_test.go index 6c0080d45a..346b34adb6 100644 --- a/flyteplugins/go/tasks/plugins/k8s/kfoperators/mpi/mpi_test.go +++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/mpi/mpi_test.go @@ -368,9 +368,10 @@ func TestBuildResourceMPIForWrongInput(t *testing.T) { func TestBuildResourceMPIExtendedResources(t *testing.T) { assert.NoError(t, flytek8sConfig.SetK8sPluginConfig(&flytek8sConfig.K8sPluginConfig{ - GpuDeviceNodeLabel: "gpu-node-label", - GpuPartitionSizeNodeLabel: "gpu-partition-size", - GpuResourceName: flytek8s.ResourceNvidiaGPU, + GpuDeviceNodeLabel: "gpu-node-label", + GpuPartitionSizeNodeLabel: "gpu-partition-size", + GpuResourceName: flytek8s.ResourceNvidiaGPU, + AddTolerationsForExtendedResources: []string{"nvidia.com/gpu"}, })) fixtures := []struct { @@ -412,6 +413,11 @@ func TestBuildResourceMPIExtendedResources(t *testing.T) { Operator: corev1.TolerationOpEqual, Effect: corev1.TaintEffectNoSchedule, }, + { + Key: "nvidia.com/gpu", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, }, }, { @@ -463,6 +469,11 @@ func TestBuildResourceMPIExtendedResources(t *testing.T) { Operator: corev1.TolerationOpEqual, Effect: corev1.TaintEffectNoSchedule, }, + { + Key: "nvidia.com/gpu", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, }, }, } @@ -569,8 +580,8 @@ func TestGetLogs(t *testing.T) { jobLogs, err := common.GetLogs(taskCtx, common.MPITaskType, mpiJob.ObjectMeta, false, workers, launcher, 0, 0) assert.NoError(t, err) assert.Equal(t, 2, len(jobLogs)) - assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=mpi-namespace", jobNamespace, jobName), jobLogs[0].Uri) - assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-1/pod?namespace=mpi-namespace", jobNamespace, jobName), jobLogs[1].Uri) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=mpi-namespace", jobNamespace, jobName), jobLogs[0].GetUri()) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-1/pod?namespace=mpi-namespace", jobNamespace, jobName), jobLogs[1].GetUri()) } func TestGetProperties(t *testing.T) { diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch.go index 6d7c80a7fd..0ee3f3570f 100644 --- a/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch.go +++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch.go @@ -61,7 +61,7 @@ func (pytorchOperatorResourceHandler) BuildResource(ctx context.Context, taskCtx var masterReplicaSpec, workerReplicaSpec *commonOp.ReplicaSpec - if taskTemplate.TaskTypeVersion == 0 { + if taskTemplate.GetTaskTypeVersion() == 0 { pytorchTaskExtraArgs := plugins.DistributedPyTorchTrainingTask{} err = utils.UnmarshalStruct(taskTemplate.GetCustom(), &pytorchTaskExtraArgs) @@ -85,7 +85,7 @@ func (pytorchOperatorResourceHandler) BuildResource(ctx context.Context, taskCtx if elasticConfig != nil { elasticPolicy = ParseElasticConfig(elasticConfig) } - } else if taskTemplate.TaskTypeVersion == 1 { + } else if taskTemplate.GetTaskTypeVersion() == 1 { kfPytorchTaskExtraArgs := kfplugins.DistributedPyTorchTrainingTask{} err = utils.UnmarshalStruct(taskTemplate.GetCustom(), &kfPytorchTaskExtraArgs) @@ -132,7 +132,7 @@ func (pytorchOperatorResourceHandler) BuildResource(ctx context.Context, taskCtx } } else { return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, - "Invalid TaskSpecification, unsupported task template version [%v] key", taskTemplate.TaskTypeVersion) + "Invalid TaskSpecification, unsupported task template version [%v] key", taskTemplate.GetTaskTypeVersion()) } if *workerReplicaSpec.Replicas <= 0 { diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch_test.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch_test.go index 814b340fe6..0f38bb2851 100644 --- a/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch_test.go +++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch_test.go @@ -473,9 +473,10 @@ func TestBuildResourcePytorchContainerImage(t *testing.T) { func TestBuildResourcePytorchExtendedResources(t *testing.T) { assert.NoError(t, flytek8sConfig.SetK8sPluginConfig(&flytek8sConfig.K8sPluginConfig{ - GpuDeviceNodeLabel: "gpu-node-label", - GpuPartitionSizeNodeLabel: "gpu-partition-size", - GpuResourceName: flytek8s.ResourceNvidiaGPU, + GpuDeviceNodeLabel: "gpu-node-label", + GpuPartitionSizeNodeLabel: "gpu-partition-size", + GpuResourceName: flytek8s.ResourceNvidiaGPU, + AddTolerationsForExtendedResources: []string{"nvidia.com/gpu"}, })) fixtures := []struct { @@ -517,6 +518,11 @@ func TestBuildResourcePytorchExtendedResources(t *testing.T) { Operator: corev1.TolerationOpEqual, Effect: corev1.TaintEffectNoSchedule, }, + { + Key: "nvidia.com/gpu", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, }, }, { @@ -568,6 +574,11 @@ func TestBuildResourcePytorchExtendedResources(t *testing.T) { Operator: corev1.TolerationOpEqual, Effect: corev1.TaintEffectNoSchedule, }, + { + Key: "nvidia.com/gpu", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, }, }, } @@ -688,9 +699,9 @@ func TestGetLogs(t *testing.T) { jobLogs, err := common.GetLogs(taskCtx, common.PytorchTaskType, pytorchJob.ObjectMeta, hasMaster, workers, 0, 0, 0) assert.NoError(t, err) assert.Equal(t, 3, len(jobLogs)) - assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-master-0/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[0].Uri) - assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[1].Uri) - assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-1/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[2].Uri) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-master-0/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[0].GetUri()) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[1].GetUri()) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-1/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[2].GetUri()) } func TestGetLogsElastic(t *testing.T) { @@ -708,8 +719,8 @@ func TestGetLogsElastic(t *testing.T) { jobLogs, err := common.GetLogs(taskCtx, common.PytorchTaskType, pytorchJob.ObjectMeta, hasMaster, workers, 0, 0, 0) assert.NoError(t, err) assert.Equal(t, 2, len(jobLogs)) - assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[0].Uri) - assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-1/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[1].Uri) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[0].GetUri()) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-1/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[1].GetUri()) } func TestGetProperties(t *testing.T) { diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/tensorflow/tensorflow.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/tensorflow/tensorflow.go index 93b4d91cd2..3c0a3e9485 100644 --- a/flyteplugins/go/tasks/plugins/k8s/kfoperators/tensorflow/tensorflow.go +++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/tensorflow/tensorflow.go @@ -55,7 +55,7 @@ func (tensorflowOperatorResourceHandler) BuildResource(ctx context.Context, task replicaSpecMap := make(map[commonOp.ReplicaType]*commonOp.ReplicaSpec) runPolicy := commonOp.RunPolicy{} - if taskTemplate.TaskTypeVersion == 0 { + if taskTemplate.GetTaskTypeVersion() == 0 { tensorflowTaskExtraArgs := plugins.DistributedTensorflowTrainingTask{} err = utils.UnmarshalStruct(taskTemplate.GetCustom(), &tensorflowTaskExtraArgs) @@ -83,7 +83,7 @@ func (tensorflowOperatorResourceHandler) BuildResource(ctx context.Context, task } } - } else if taskTemplate.TaskTypeVersion == 1 { + } else if taskTemplate.GetTaskTypeVersion() == 1 { kfTensorflowTaskExtraArgs := kfplugins.DistributedTensorflowTrainingTask{} err = utils.UnmarshalStruct(taskTemplate.GetCustom(), &kfTensorflowTaskExtraArgs) @@ -125,7 +125,7 @@ func (tensorflowOperatorResourceHandler) BuildResource(ctx context.Context, task } else { return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, - "Invalid TaskSpecification, unsupported task template version [%v] key", taskTemplate.TaskTypeVersion) + "Invalid TaskSpecification, unsupported task template version [%v] key", taskTemplate.GetTaskTypeVersion()) } if v, ok := replicaSpecMap[kubeflowv1.TFJobReplicaTypeWorker]; !ok || *v.Replicas <= 0 { diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/tensorflow/tensorflow_test.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/tensorflow/tensorflow_test.go index d4d6e6da17..22b750c22b 100644 --- a/flyteplugins/go/tasks/plugins/k8s/kfoperators/tensorflow/tensorflow_test.go +++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/tensorflow/tensorflow_test.go @@ -628,11 +628,11 @@ func TestGetLogs(t *testing.T) { workers, psReplicas, chiefReplicas, evaluatorReplicas) assert.NoError(t, err) assert.Equal(t, 5, len(jobLogs)) - assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[0].Uri) - assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-1/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[1].Uri) - assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-psReplica-0/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[2].Uri) - assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-chiefReplica-0/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[3].Uri) - assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-evaluatorReplica-0/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[4].Uri) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[0].GetUri()) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-1/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[1].GetUri()) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-psReplica-0/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[2].GetUri()) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-chiefReplica-0/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[3].GetUri()) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-evaluatorReplica-0/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[4].GetUri()) } func TestGetProperties(t *testing.T) { diff --git a/flyteplugins/go/tasks/plugins/k8s/pod/plugin.go b/flyteplugins/go/tasks/plugins/k8s/pod/plugin.go index 2a08cd0e6c..60b0d5c8d5 100644 --- a/flyteplugins/go/tasks/plugins/k8s/pod/plugin.go +++ b/flyteplugins/go/tasks/plugins/k8s/pod/plugin.go @@ -59,7 +59,7 @@ func (p plugin) BuildResource(ctx context.Context, taskCtx pluginsCore.TaskExecu } primaryContainerName := "" - if taskTemplate.Type == SidecarTaskType && taskTemplate.TaskTypeVersion == 0 { + if taskTemplate.GetType() == SidecarTaskType && taskTemplate.GetTaskTypeVersion() == 0 { // handles pod tasks when they are defined as Sidecar tasks and marshal the podspec using k8s proto. sidecarJob := sidecarJob{} err := utils.UnmarshalStructToObj(taskTemplate.GetCustom(), &sidecarJob) @@ -79,7 +79,7 @@ func (p plugin) BuildResource(ctx context.Context, taskCtx pluginsCore.TaskExecu // update annotations and labels objectMeta.Annotations = utils.UnionMaps(objectMeta.Annotations, sidecarJob.Annotations) objectMeta.Labels = utils.UnionMaps(objectMeta.Labels, sidecarJob.Labels) - } else if taskTemplate.Type == SidecarTaskType && taskTemplate.TaskTypeVersion == 1 { + } else if taskTemplate.GetType() == SidecarTaskType && taskTemplate.GetTaskTypeVersion() == 1 { // handles pod tasks that marshal the pod spec to the task custom. err := utils.UnmarshalStructToObj(taskTemplate.GetCustom(), &podSpec) if err != nil { @@ -100,9 +100,9 @@ func (p plugin) BuildResource(ctx context.Context, taskCtx pluginsCore.TaskExecu } // update annotations and labels - if taskTemplate.GetK8SPod() != nil && taskTemplate.GetK8SPod().Metadata != nil { - objectMeta.Annotations = utils.UnionMaps(objectMeta.Annotations, taskTemplate.GetK8SPod().Metadata.Annotations) - objectMeta.Labels = utils.UnionMaps(objectMeta.Labels, taskTemplate.GetK8SPod().Metadata.Labels) + if taskTemplate.GetK8SPod() != nil && taskTemplate.GetK8SPod().GetMetadata() != nil { + objectMeta.Annotations = utils.UnionMaps(objectMeta.Annotations, taskTemplate.GetK8SPod().GetMetadata().GetAnnotations()) + objectMeta.Labels = utils.UnionMaps(objectMeta.Labels, taskTemplate.GetK8SPod().GetMetadata().GetLabels()) } } else { // handles both container / pod tasks that use the TaskTemplate Container and K8sPod fields @@ -122,7 +122,7 @@ func (p plugin) BuildResource(ctx context.Context, taskCtx pluginsCore.TaskExecu // set primaryContainerKey annotation if this is a Sidecar task or, as an optimization, if there is only a single // container. this plugin marks the task complete if the primary Container is complete, so if there is only one // container we can mark the task as complete before the Pod has been marked complete. - if taskTemplate.Type == SidecarTaskType || len(podSpec.Containers) == 1 { + if taskTemplate.GetType() == SidecarTaskType || len(podSpec.Containers) == 1 { objectMeta.Annotations[flytek8s.PrimaryContainerKey] = primaryContainerName } diff --git a/flyteplugins/go/tasks/plugins/k8s/ray/config.go b/flyteplugins/go/tasks/plugins/k8s/ray/config.go index 9a05f98f25..3f79ed958e 100644 --- a/flyteplugins/go/tasks/plugins/k8s/ray/config.go +++ b/flyteplugins/go/tasks/plugins/k8s/ray/config.go @@ -22,7 +22,7 @@ var ( IncludeDashboard: true, DashboardHost: "0.0.0.0", EnableUsageStats: false, - ServiceAccount: "default", + ServiceAccount: "", Defaults: DefaultConfig{ HeadNode: NodeConfig{ StartParameters: map[string]string{ diff --git a/flyteplugins/go/tasks/plugins/k8s/ray/config_test.go b/flyteplugins/go/tasks/plugins/k8s/ray/config_test.go index d19c047ca6..07b0c2842a 100644 --- a/flyteplugins/go/tasks/plugins/k8s/ray/config_test.go +++ b/flyteplugins/go/tasks/plugins/k8s/ray/config_test.go @@ -32,6 +32,6 @@ func TestLoadDefaultServiceAccountConfig(t *testing.T) { t.Run("serviceAccount", func(t *testing.T) { config := GetConfig() - assert.Equal(t, config.ServiceAccount, "default") + assert.Equal(t, config.ServiceAccount, "") }) } diff --git a/flyteplugins/go/tasks/plugins/k8s/ray/ray.go b/flyteplugins/go/tasks/plugins/k8s/ray/ray.go index 95a87f4efa..d264b69c9e 100644 --- a/flyteplugins/go/tasks/plugins/k8s/ray/ray.go +++ b/flyteplugins/go/tasks/plugins/k8s/ray/ray.go @@ -94,8 +94,8 @@ func (rayJobResourceHandler) BuildResource(ctx context.Context, taskCtx pluginsC cfg := GetConfig() headNodeRayStartParams := make(map[string]string) - if rayJob.RayCluster.HeadGroupSpec != nil && rayJob.RayCluster.HeadGroupSpec.RayStartParams != nil { - headNodeRayStartParams = rayJob.RayCluster.HeadGroupSpec.RayStartParams + if rayJob.GetRayCluster().GetHeadGroupSpec() != nil && rayJob.RayCluster.HeadGroupSpec.RayStartParams != nil { + headNodeRayStartParams = rayJob.GetRayCluster().GetHeadGroupSpec().GetRayStartParams() } else if headNode := cfg.Defaults.HeadNode; len(headNode.StartParameters) > 0 { headNodeRayStartParams = headNode.StartParameters } @@ -118,24 +118,30 @@ func (rayJobResourceHandler) BuildResource(ctx context.Context, taskCtx pluginsC podSpec.ServiceAccountName = cfg.ServiceAccount - headPodSpec := podSpec.DeepCopy() - - rayjob, err := constructRayJob(taskCtx, rayJob, objectMeta, *podSpec, headPodSpec, headNodeRayStartParams, primaryContainerIdx, *primaryContainer) + rayjob, err := constructRayJob(taskCtx, &rayJob, objectMeta, *podSpec, headNodeRayStartParams, primaryContainerIdx, *primaryContainer) return rayjob, err } -func constructRayJob(taskCtx pluginsCore.TaskExecutionContext, rayJob plugins.RayJob, objectMeta *metav1.ObjectMeta, podSpec v1.PodSpec, headPodSpec *v1.PodSpec, headNodeRayStartParams map[string]string, primaryContainerIdx int, primaryContainer v1.Container) (*rayv1.RayJob, error) { +func constructRayJob(taskCtx pluginsCore.TaskExecutionContext, rayJob *plugins.RayJob, objectMeta *metav1.ObjectMeta, taskPodSpec v1.PodSpec, headNodeRayStartParams map[string]string, primaryContainerIdx int, primaryContainer v1.Container) (*rayv1.RayJob, error) { enableIngress := true cfg := GetConfig() + + headPodSpec := taskPodSpec.DeepCopy() + headPodTemplate, err := buildHeadPodTemplate( + &headPodSpec.Containers[primaryContainerIdx], + headPodSpec, + objectMeta, + taskCtx, + rayJob.GetRayCluster().GetHeadGroupSpec(), + ) + if err != nil { + return nil, err + } + rayClusterSpec := rayv1.RayClusterSpec{ HeadGroupSpec: rayv1.HeadGroupSpec{ - Template: buildHeadPodTemplate( - &headPodSpec.Containers[primaryContainerIdx], - headPodSpec, - objectMeta, - taskCtx, - ), + Template: headPodTemplate, ServiceType: v1.ServiceType(cfg.ServiceType), EnableIngress: &enableIngress, RayStartParams: headNodeRayStartParams, @@ -144,18 +150,22 @@ func constructRayJob(taskCtx pluginsCore.TaskExecutionContext, rayJob plugins.Ra EnableInTreeAutoscaling: &rayJob.RayCluster.EnableAutoscaling, } - for _, spec := range rayJob.RayCluster.WorkerGroupSpec { - workerPodSpec := podSpec.DeepCopy() - workerPodTemplate := buildWorkerPodTemplate( + for _, spec := range rayJob.GetRayCluster().GetWorkerGroupSpec() { + workerPodSpec := taskPodSpec.DeepCopy() + workerPodTemplate, err := buildWorkerPodTemplate( &workerPodSpec.Containers[primaryContainerIdx], workerPodSpec, objectMeta, taskCtx, + spec, ) + if err != nil { + return nil, err + } workerNodeRayStartParams := make(map[string]string) if spec.RayStartParams != nil { - workerNodeRayStartParams = spec.RayStartParams + workerNodeRayStartParams = spec.GetRayStartParams() } else if workerNode := cfg.Defaults.WorkerNode; len(workerNode.StartParameters) > 0 { workerNodeRayStartParams = workerNode.StartParameters } @@ -168,17 +178,17 @@ func constructRayJob(taskCtx pluginsCore.TaskExecutionContext, rayJob plugins.Ra workerNodeRayStartParams[DisableUsageStatsStartParameter] = DisableUsageStatsStartParameterVal } - minReplicas := spec.MinReplicas - if minReplicas > spec.Replicas { - minReplicas = spec.Replicas + minReplicas := spec.GetMinReplicas() + if minReplicas > spec.GetReplicas() { + minReplicas = spec.GetReplicas() } - maxReplicas := spec.MaxReplicas - if maxReplicas < spec.Replicas { - maxReplicas = spec.Replicas + maxReplicas := spec.GetMaxReplicas() + if maxReplicas < spec.GetReplicas() { + maxReplicas = spec.GetReplicas() } workerNodeSpec := rayv1.WorkerGroupSpec{ - GroupName: spec.GroupName, + GroupName: spec.GetGroupName(), MinReplicas: &minReplicas, MaxReplicas: &maxReplicas, Replicas: &spec.Replicas, @@ -190,7 +200,7 @@ func constructRayJob(taskCtx pluginsCore.TaskExecutionContext, rayJob plugins.Ra } serviceAccountName := flytek8s.GetServiceAccountNameFromTaskExecutionMetadata(taskCtx.TaskExecutionMetadata()) - if len(serviceAccountName) == 0 { + if len(serviceAccountName) == 0 || cfg.ServiceAccount != "" { serviceAccountName = cfg.ServiceAccount } @@ -201,20 +211,20 @@ func constructRayJob(taskCtx pluginsCore.TaskExecutionContext, rayJob plugins.Ra shutdownAfterJobFinishes := cfg.ShutdownAfterJobFinishes ttlSecondsAfterFinished := &cfg.TTLSecondsAfterFinished - if rayJob.ShutdownAfterJobFinishes { + if rayJob.GetShutdownAfterJobFinishes() { shutdownAfterJobFinishes = true ttlSecondsAfterFinished = &rayJob.TtlSecondsAfterFinished } - submitterPodTemplate := buildSubmitterPodTemplate(headPodSpec, objectMeta, taskCtx) + submitterPodSpec := taskPodSpec.DeepCopy() + submitterPodTemplate := buildSubmitterPodTemplate(submitterPodSpec, objectMeta, taskCtx) // TODO: This is for backward compatibility. Remove this block once runtime_env is removed from ray proto. - var err error var runtimeEnvYaml string - runtimeEnvYaml = rayJob.RuntimeEnvYaml + runtimeEnvYaml = rayJob.GetRuntimeEnvYaml() // If runtime_env exists but runtime_env_yaml does not, convert runtime_env to runtime_env_yaml - if rayJob.RuntimeEnv != "" && rayJob.RuntimeEnvYaml == "" { - runtimeEnvYaml, err = convertBase64RuntimeEnvToYaml(rayJob.RuntimeEnv) + if rayJob.GetRuntimeEnv() != "" && rayJob.GetRuntimeEnvYaml() == "" { + runtimeEnvYaml, err = convertBase64RuntimeEnvToYaml(rayJob.GetRuntimeEnv()) if err != nil { return nil, err } @@ -319,7 +329,7 @@ func injectLogsSidecar(primaryContainer *v1.Container, podSpec *v1.PodSpec) { podSpec.Containers = append(podSpec.Containers, *sidecar) } -func buildHeadPodTemplate(primaryContainer *v1.Container, podSpec *v1.PodSpec, objectMeta *metav1.ObjectMeta, taskCtx pluginsCore.TaskExecutionContext) v1.PodTemplateSpec { +func buildHeadPodTemplate(primaryContainer *v1.Container, basePodSpec *v1.PodSpec, objectMeta *metav1.ObjectMeta, taskCtx pluginsCore.TaskExecutionContext, spec *plugins.HeadGroupSpec) (v1.PodTemplateSpec, error) { // Some configs are copy from https://github.com/ray-project/kuberay/blob/b72e6bdcd9b8c77a9dc6b5da8560910f3a0c3ffd/apiserver/pkg/util/cluster.go#L97 // They should always be the same, so we could hard code here. primaryContainer.Name = "ray-head" @@ -357,30 +367,40 @@ func buildHeadPodTemplate(primaryContainer *v1.Container, podSpec *v1.PodSpec, o primaryContainer.Ports = append(primaryContainer.Ports, ports...) // Inject a sidecar for capturing and exposing Ray job logs - injectLogsSidecar(primaryContainer, podSpec) + injectLogsSidecar(primaryContainer, basePodSpec) + + basePodSpec, err := mergeCustomPodSpec(primaryContainer, basePodSpec, spec.GetK8SPod()) + if err != nil { + return v1.PodTemplateSpec{}, err + } + + basePodSpec = flytek8s.AddTolerationsForExtendedResources(basePodSpec) podTemplateSpec := v1.PodTemplateSpec{ - Spec: *podSpec, + Spec: *basePodSpec, ObjectMeta: *objectMeta, } cfg := config.GetK8sPluginConfig() podTemplateSpec.SetLabels(utils.UnionMaps(cfg.DefaultLabels, podTemplateSpec.GetLabels(), utils.CopyMap(taskCtx.TaskExecutionMetadata().GetLabels()))) podTemplateSpec.SetAnnotations(utils.UnionMaps(cfg.DefaultAnnotations, podTemplateSpec.GetAnnotations(), utils.CopyMap(taskCtx.TaskExecutionMetadata().GetAnnotations()))) - return podTemplateSpec + return podTemplateSpec, nil } func buildSubmitterPodTemplate(podSpec *v1.PodSpec, objectMeta *metav1.ObjectMeta, taskCtx pluginsCore.TaskExecutionContext) v1.PodTemplateSpec { + submitterPodSpec := podSpec.DeepCopy() + podTemplateSpec := v1.PodTemplateSpec{ - Spec: *podSpec, ObjectMeta: *objectMeta, + Spec: *submitterPodSpec, } + cfg := config.GetK8sPluginConfig() podTemplateSpec.SetLabels(utils.UnionMaps(cfg.DefaultLabels, podTemplateSpec.GetLabels(), utils.CopyMap(taskCtx.TaskExecutionMetadata().GetLabels()))) podTemplateSpec.SetAnnotations(utils.UnionMaps(cfg.DefaultAnnotations, podTemplateSpec.GetAnnotations(), utils.CopyMap(taskCtx.TaskExecutionMetadata().GetAnnotations()))) return podTemplateSpec } -func buildWorkerPodTemplate(primaryContainer *v1.Container, podSpec *v1.PodSpec, objectMetadata *metav1.ObjectMeta, taskCtx pluginsCore.TaskExecutionContext) v1.PodTemplateSpec { +func buildWorkerPodTemplate(primaryContainer *v1.Container, basePodSpec *v1.PodSpec, objectMetadata *metav1.ObjectMeta, taskCtx pluginsCore.TaskExecutionContext, spec *plugins.WorkerGroupSpec) (v1.PodTemplateSpec, error) { // Some configs are copy from https://github.com/ray-project/kuberay/blob/b72e6bdcd9b8c77a9dc6b5da8560910f3a0c3ffd/apiserver/pkg/util/cluster.go#L185 // They should always be the same, so we could hard code here. @@ -479,13 +499,52 @@ func buildWorkerPodTemplate(primaryContainer *v1.Container, podSpec *v1.PodSpec, } primaryContainer.Ports = append(primaryContainer.Ports, ports...) + basePodSpec, err := mergeCustomPodSpec(primaryContainer, basePodSpec, spec.GetK8SPod()) + if err != nil { + return v1.PodTemplateSpec{}, err + } + + basePodSpec = flytek8s.AddTolerationsForExtendedResources(basePodSpec) + podTemplateSpec := v1.PodTemplateSpec{ - Spec: *podSpec, + Spec: *basePodSpec, ObjectMeta: *objectMetadata, } podTemplateSpec.SetLabels(utils.UnionMaps(podTemplateSpec.GetLabels(), utils.CopyMap(taskCtx.TaskExecutionMetadata().GetLabels()))) podTemplateSpec.SetAnnotations(utils.UnionMaps(podTemplateSpec.GetAnnotations(), utils.CopyMap(taskCtx.TaskExecutionMetadata().GetAnnotations()))) - return podTemplateSpec + return podTemplateSpec, nil +} + +// Merges a ray head/worker node custom pod specs onto task's generated pod spec +func mergeCustomPodSpec(primaryContainer *v1.Container, podSpec *v1.PodSpec, k8sPod *core.K8SPod) (*v1.PodSpec, error) { + if k8sPod == nil { + return podSpec, nil + } + + if k8sPod.GetPodSpec() == nil { + return podSpec, nil + } + + var customPodSpec *v1.PodSpec + + err := utils.UnmarshalStructToObj(k8sPod.GetPodSpec(), &customPodSpec) + if err != nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, + "Unable to unmarshal pod spec [%v], Err: [%v]", k8sPod.GetPodSpec(), err.Error()) + } + + for _, container := range customPodSpec.Containers { + if container.Name != primaryContainer.Name { // Only support the primary container for now + continue + } + + // Just handle resources for now + if len(container.Resources.Requests) > 0 || len(container.Resources.Limits) > 0 { + primaryContainer.Resources = container.Resources + } + } + + return podSpec, nil } func (rayJobResourceHandler) BuildIdentityResource(ctx context.Context, taskCtx pluginsCore.TaskExecutionMetadata) (client.Object, error) { diff --git a/flyteplugins/go/tasks/plugins/k8s/ray/ray_test.go b/flyteplugins/go/tasks/plugins/k8s/ray/ray_test.go index 38b2f56785..40aa509f54 100644 --- a/flyteplugins/go/tasks/plugins/k8s/ray/ray_test.go +++ b/flyteplugins/go/tasks/plugins/k8s/ray/ray_test.go @@ -2,18 +2,19 @@ package ray import ( "context" + "encoding/json" "reflect" "testing" "time" - structpb "github.com/golang/protobuf/ptypes/struct" rayv1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/types/known/structpb" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins" @@ -148,7 +149,7 @@ func dummyRayTaskContext(taskTemplate *core.TaskTemplate, resources *corev1.Reso taskExecutionMetadata.OnGetNamespace().Return("test-namespace") taskExecutionMetadata.OnGetAnnotations().Return(map[string]string{"annotation-1": "val1"}) taskExecutionMetadata.OnGetLabels().Return(map[string]string{"label-1": "val1"}) - taskExecutionMetadata.OnGetOwnerReference().Return(v1.OwnerReference{ + taskExecutionMetadata.OnGetOwnerReference().Return(metav1.OwnerReference{ Kind: "node", Name: "blah", }) @@ -279,9 +280,10 @@ func TestBuildResourceRayContainerImage(t *testing.T) { func TestBuildResourceRayExtendedResources(t *testing.T) { assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ - GpuDeviceNodeLabel: "gpu-node-label", - GpuPartitionSizeNodeLabel: "gpu-partition-size", - GpuResourceName: flytek8s.ResourceNvidiaGPU, + GpuDeviceNodeLabel: "gpu-node-label", + GpuPartitionSizeNodeLabel: "gpu-partition-size", + GpuResourceName: flytek8s.ResourceNvidiaGPU, + AddTolerationsForExtendedResources: []string{"nvidia.com/gpu"}, })) params := []struct { @@ -323,6 +325,11 @@ func TestBuildResourceRayExtendedResources(t *testing.T) { Operator: corev1.TolerationOpEqual, Effect: corev1.TaintEffectNoSchedule, }, + { + Key: "nvidia.com/gpu", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, }, }, { @@ -374,6 +381,11 @@ func TestBuildResourceRayExtendedResources(t *testing.T) { Operator: corev1.TolerationOpEqual, Effect: corev1.TaintEffectNoSchedule, }, + { + Key: "nvidia.com/gpu", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, }, }, } @@ -419,6 +431,123 @@ func TestBuildResourceRayExtendedResources(t *testing.T) { } } +func TestBuildResourceRayCustomK8SPod(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{})) + + headResourceEntries := []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "10"}, + {Name: core.Resources_MEMORY, Value: "10Gi"}, + {Name: core.Resources_GPU, Value: "10"}, + } + headResources := &core.Resources{Requests: headResourceEntries, Limits: headResourceEntries} + + expectedHeadResources, err := flytek8s.ToK8sResourceRequirements(headResources) + require.NoError(t, err) + + workerResourceEntries := []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "20"}, + {Name: core.Resources_MEMORY, Value: "20Gi"}, + {Name: core.Resources_GPU, Value: "20"}, + } + workerResources := &core.Resources{Requests: workerResourceEntries, Limits: workerResourceEntries} + + expectedWorkerResources, err := flytek8s.ToK8sResourceRequirements(workerResources) + require.NoError(t, err) + + headPodSpec := &corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "ray-head", + Resources: *expectedHeadResources, + }, + }, + } + workerPodSpec := &corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "ray-worker", + Resources: *expectedWorkerResources, + }, + }, + } + + params := []struct { + name string + taskResources *corev1.ResourceRequirements + headK8SPod *core.K8SPod + workerK8SPod *core.K8SPod + expectedSubmitterResources *corev1.ResourceRequirements + expectedHeadResources *corev1.ResourceRequirements + expectedWorkerResources *corev1.ResourceRequirements + }{ + { + name: "task resources", + taskResources: resourceRequirements, + expectedSubmitterResources: resourceRequirements, + expectedHeadResources: resourceRequirements, + expectedWorkerResources: resourceRequirements, + }, + { + name: "custom worker and head resources", + taskResources: resourceRequirements, + headK8SPod: &core.K8SPod{ + PodSpec: transformStructToStructPB(t, headPodSpec), + }, + workerK8SPod: &core.K8SPod{ + PodSpec: transformStructToStructPB(t, workerPodSpec), + }, + expectedSubmitterResources: resourceRequirements, + expectedHeadResources: expectedHeadResources, + expectedWorkerResources: expectedWorkerResources, + }, + } + + for _, p := range params { + t.Run(p.name, func(t *testing.T) { + rayJobInput := dummyRayCustomObj() + + if p.headK8SPod != nil { + rayJobInput.RayCluster.HeadGroupSpec.K8SPod = p.headK8SPod + } + + if p.workerK8SPod != nil { + for _, spec := range rayJobInput.GetRayCluster().GetWorkerGroupSpec() { + spec.K8SPod = p.workerK8SPod + } + } + + taskTemplate := dummyRayTaskTemplate("ray-id", rayJobInput) + taskContext := dummyRayTaskContext(taskTemplate, p.taskResources, nil, "", serviceAccount) + rayJobResourceHandler := rayJobResourceHandler{} + r, err := rayJobResourceHandler.BuildResource(context.TODO(), taskContext) + assert.Nil(t, err) + assert.NotNil(t, r) + rayJob, ok := r.(*rayv1.RayJob) + assert.True(t, ok) + + submitterPodResources := rayJob.Spec.SubmitterPodTemplate.Spec.Containers[0].Resources + assert.EqualValues(t, + p.expectedSubmitterResources, + &submitterPodResources, + ) + + headPodResources := rayJob.Spec.RayClusterSpec.HeadGroupSpec.Template.Spec.Containers[0].Resources + assert.EqualValues(t, + p.expectedHeadResources, + &headPodResources, + ) + + for _, workerGroupSpec := range rayJob.Spec.RayClusterSpec.WorkerGroupSpecs { + workerPodResources := workerGroupSpec.Template.Spec.Containers[0].Resources + assert.EqualValues(t, + p.expectedWorkerResources, + &workerPodResources, + ) + } + }) + } +} + func TestDefaultStartParameters(t *testing.T) { rayJobResourceHandler := rayJobResourceHandler{} rayJob := &plugins.RayJob{ @@ -1103,3 +1232,14 @@ func TestGetPropertiesRay(t *testing.T) { expected := k8s.PluginProperties{} assert.Equal(t, expected, rayJobResourceHandler.GetProperties()) } + +func transformStructToStructPB(t *testing.T, obj interface{}) *structpb.Struct { + data, err := json.Marshal(obj) + assert.Nil(t, err) + podSpecMap := make(map[string]interface{}) + err = json.Unmarshal(data, &podSpecMap) + assert.Nil(t, err) + s, err := structpb.NewStruct(podSpecMap) + assert.Nil(t, err) + return s +} diff --git a/flyteplugins/go/tasks/plugins/k8s/spark/spark.go b/flyteplugins/go/tasks/plugins/k8s/spark/spark.go index 8b766a391a..6873fc2257 100644 --- a/flyteplugins/go/tasks/plugins/k8s/spark/spark.go +++ b/flyteplugins/go/tasks/plugins/k8s/spark/spark.go @@ -44,7 +44,7 @@ func validateSparkJob(sparkJob *plugins.SparkJob) error { return fmt.Errorf("empty sparkJob") } - if len(sparkJob.MainApplicationFile) == 0 && len(sparkJob.MainClass) == 0 { + if len(sparkJob.GetMainApplicationFile()) == 0 && len(sparkJob.GetMainClass()) == 0 { return fmt.Errorf("either MainApplicationFile or MainClass must be set") } @@ -262,10 +262,10 @@ func createSparkApplication(sparkJob *plugins.SparkJob, sparkConfig map[string]s app.Spec.BatchScheduler = &val } - if sparkJob.MainApplicationFile != "" { + if sparkJob.GetMainApplicationFile() != "" { app.Spec.MainApplicationFile = &sparkJob.MainApplicationFile } - if sparkJob.MainClass != "" { + if sparkJob.GetMainClass() != "" { app.Spec.MainClass = &sparkJob.MainClass } return app diff --git a/flyteplugins/go/tasks/plugins/k8s/spark/spark_test.go b/flyteplugins/go/tasks/plugins/k8s/spark/spark_test.go index d657d4c273..0a6f51d0e2 100644 --- a/flyteplugins/go/tasks/plugins/k8s/spark/spark_test.go +++ b/flyteplugins/go/tasks/plugins/k8s/spark/spark_test.go @@ -101,10 +101,10 @@ func TestGetEventInfo(t *testing.T) { info, err := getEventInfoForSpark(taskCtx, dummySparkApplication(sj.RunningState)) assert.NoError(t, err) assert.Len(t, info.Logs, 6) - assert.Equal(t, "https://spark-ui.flyte", info.CustomInfo.Fields[sparkDriverUI].GetStringValue()) + assert.Equal(t, "https://spark-ui.flyte", info.CustomInfo.GetFields()[sparkDriverUI].GetStringValue()) generatedLinks := make([]string, 0, len(info.Logs)) for _, l := range info.Logs { - generatedLinks = append(generatedLinks, l.Uri) + generatedLinks = append(generatedLinks, l.GetUri()) } expectedLinks := []string{ @@ -121,12 +121,12 @@ func TestGetEventInfo(t *testing.T) { info, err = getEventInfoForSpark(taskCtx, dummySparkApplication(sj.SubmittedState)) generatedLinks = make([]string, 0, len(info.Logs)) for _, l := range info.Logs { - generatedLinks = append(generatedLinks, l.Uri) + generatedLinks = append(generatedLinks, l.GetUri()) } assert.NoError(t, err) assert.Len(t, info.Logs, 5) assert.Equal(t, expectedLinks[:5], generatedLinks) // No Spark Driver UI for Submitted state - assert.True(t, info.Logs[4].ShowWhilePending) // All User Logs should be shown while pending + assert.True(t, info.Logs[4].GetShowWhilePending()) // All User Logs should be shown while pending assert.NoError(t, setSparkConfig(&Config{ SparkHistoryServerURL: "spark-history.flyte", @@ -151,10 +151,10 @@ func TestGetEventInfo(t *testing.T) { info, err = getEventInfoForSpark(taskCtx, dummySparkApplication(sj.FailedState)) assert.NoError(t, err) assert.Len(t, info.Logs, 5) - assert.Equal(t, "spark-history.flyte/history/app-id", info.CustomInfo.Fields[sparkHistoryUI].GetStringValue()) + assert.Equal(t, "spark-history.flyte/history/app-id", info.CustomInfo.GetFields()[sparkHistoryUI].GetStringValue()) generatedLinks = make([]string, 0, len(info.Logs)) for _, l := range info.Logs { - generatedLinks = append(generatedLinks, l.Uri) + generatedLinks = append(generatedLinks, l.GetUri()) } expectedLinks = []string{ @@ -853,7 +853,7 @@ func TestBuildResourcePodTemplate(t *testing.T) { assert.Equal(t, defaultConfig.DefaultEnvVars["foo"], findEnvVarByName(sparkApp.Spec.Driver.Env, "foo").Value) assert.Equal(t, defaultConfig.DefaultEnvVars["fooEnv"], findEnvVarByName(sparkApp.Spec.Driver.Env, "fooEnv").Value) assert.Equal(t, findEnvVarByName(dummyEnvVarsWithSecretRef, "SECRET"), findEnvVarByName(sparkApp.Spec.Driver.Env, "SECRET")) - assert.Equal(t, 10, len(sparkApp.Spec.Driver.Env)) + assert.Equal(t, 9, len(sparkApp.Spec.Driver.Env)) assert.Equal(t, testImage, *sparkApp.Spec.Driver.Image) assert.Equal(t, flytek8s.GetServiceAccountNameFromTaskExecutionMetadata(taskCtx.TaskExecutionMetadata()), *sparkApp.Spec.Driver.ServiceAccount) assert.Equal(t, defaultConfig.DefaultPodSecurityContext, sparkApp.Spec.Driver.SecurityContenxt) @@ -890,7 +890,7 @@ func TestBuildResourcePodTemplate(t *testing.T) { assert.Equal(t, defaultConfig.DefaultEnvVars["foo"], findEnvVarByName(sparkApp.Spec.Executor.Env, "foo").Value) assert.Equal(t, defaultConfig.DefaultEnvVars["fooEnv"], findEnvVarByName(sparkApp.Spec.Executor.Env, "fooEnv").Value) assert.Equal(t, findEnvVarByName(dummyEnvVarsWithSecretRef, "SECRET"), findEnvVarByName(sparkApp.Spec.Executor.Env, "SECRET")) - assert.Equal(t, 10, len(sparkApp.Spec.Executor.Env)) + assert.Equal(t, 9, len(sparkApp.Spec.Executor.Env)) assert.Equal(t, testImage, *sparkApp.Spec.Executor.Image) assert.Equal(t, defaultConfig.DefaultPodSecurityContext, sparkApp.Spec.Executor.SecurityContenxt) assert.Equal(t, defaultConfig.DefaultPodDNSConfig, sparkApp.Spec.Executor.DNSConfig) diff --git a/flyteplugins/go/tasks/plugins/presto/execution_state.go b/flyteplugins/go/tasks/plugins/presto/execution_state.go index 3399c013ae..88edb30cb8 100644 --- a/flyteplugins/go/tasks/plugins/presto/execution_state.go +++ b/flyteplugins/go/tasks/plugins/presto/execution_state.go @@ -217,10 +217,10 @@ func GetQueryInfo(ctx context.Context, tCtx core.TaskExecutionContext) (string, } outputs, err := template.Render(ctx, []string{ - prestoQuery.RoutingGroup, - prestoQuery.Catalog, - prestoQuery.Schema, - prestoQuery.Statement, + prestoQuery.GetRoutingGroup(), + prestoQuery.GetCatalog(), + prestoQuery.GetSchema(), + prestoQuery.GetStatement(), }, template.Parameters{ TaskExecMetadata: tCtx.TaskExecutionMetadata(), Inputs: tCtx.InputReader(), @@ -241,7 +241,7 @@ func GetQueryInfo(ctx context.Context, tCtx core.TaskExecutionContext) (string, } func validatePrestoStatement(prestoJob plugins.PrestoQuery) error { - if prestoJob.Statement == "" { + if prestoJob.GetStatement() == "" { return errors.Errorf(errors.BadTaskSpecification, "Query could not be found. Please ensure that you are at least on Flytekit version 0.3.0 or later.") } @@ -440,7 +440,7 @@ func writeOutput(ctx context.Context, tCtx core.TaskExecutionContext, externalLo return err } - results := taskTemplate.Interface.Outputs.Variables["results"] + results := taskTemplate.GetInterface().GetOutputs().GetVariables()["results"] return tCtx.OutputWriter().Put(ctx, ioutils.NewInMemoryOutputReader( &pb.LiteralMap{ @@ -474,13 +474,13 @@ func MapExecutionStateToPhaseInfo(state ExecutionState) core.PhaseInfo { if state.CreationFailureCount > 5 { phaseInfo = core.PhaseInfoRetryableFailure("PrestoFailure", "Too many creation attempts", nil) } else { - phaseInfo = core.PhaseInfoRunning(uint32(3*state.QueryCount+1), ConstructTaskInfo(state)) + phaseInfo = core.PhaseInfoRunning(uint32(3*state.QueryCount+1), ConstructTaskInfo(state)) // #nosec G115 } case PhaseSubmitted: - phaseInfo = core.PhaseInfoRunning(uint32(3*state.QueryCount+2), ConstructTaskInfo(state)) + phaseInfo = core.PhaseInfoRunning(uint32(3*state.QueryCount+2), ConstructTaskInfo(state)) // #nosec G115 case PhaseQuerySucceeded: if state.QueryCount < 5 { - phaseInfo = core.PhaseInfoRunning(uint32(3*state.QueryCount+3), ConstructTaskInfo(state)) + phaseInfo = core.PhaseInfoRunning(uint32(3*state.QueryCount+3), ConstructTaskInfo(state)) // #nosec G115 } else { phaseInfo = core.PhaseInfoSuccess(ConstructTaskInfo(state)) } diff --git a/flyteplugins/go/tasks/plugins/presto/execution_state_test.go b/flyteplugins/go/tasks/plugins/presto/execution_state_test.go index 4d20d64ee6..e89f1af4ae 100644 --- a/flyteplugins/go/tasks/plugins/presto/execution_state_test.go +++ b/flyteplugins/go/tasks/plugins/presto/execution_state_test.go @@ -84,7 +84,7 @@ func TestConstructTaskLog(t *testing.T) { u, err := url.Parse(expected) assert.NoError(t, err) taskLog := ConstructTaskLog(ExecutionState{CommandID: "123", URI: u.String()}) - assert.Equal(t, expected, taskLog.Uri) + assert.Equal(t, expected, taskLog.GetUri()) } func TestConstructTaskInfo(t *testing.T) { @@ -103,7 +103,7 @@ func TestConstructTaskInfo(t *testing.T) { } taskInfo := ConstructTaskInfo(e) - assert.Equal(t, "https://prestoproxy-internal.flyteorg.net:443", taskInfo.Logs[0].Uri) + assert.Equal(t, "https://prestoproxy-internal.flyteorg.net:443", taskInfo.Logs[0].GetUri()) assert.Len(t, taskInfo.ExternalResources, 1) assert.Equal(t, taskInfo.ExternalResources[0].ExternalID, "123") } diff --git a/flyteplugins/go/tasks/plugins/presto/executions_cache.go b/flyteplugins/go/tasks/plugins/presto/executions_cache.go index cc5248c0f2..b41df763d0 100644 --- a/flyteplugins/go/tasks/plugins/presto/executions_cache.go +++ b/flyteplugins/go/tasks/plugins/presto/executions_cache.go @@ -36,7 +36,8 @@ func NewPrestoExecutionsCache( scope: scope, cfg: cfg, } - autoRefreshCache, err := cache.NewAutoRefreshCache(cfg.RefreshCacheConfig.Name, q.SyncPrestoQuery, workqueue.DefaultControllerRateLimiter(), cfg.RefreshCacheConfig.SyncPeriod.Duration, cfg.RefreshCacheConfig.Workers, cfg.RefreshCacheConfig.LruCacheSize, scope) + // #nosec G115 + autoRefreshCache, err := cache.NewAutoRefreshCache(cfg.RefreshCacheConfig.Name, q.SyncPrestoQuery, workqueue.DefaultControllerRateLimiter(), cfg.RefreshCacheConfig.SyncPeriod.Duration, uint(cfg.RefreshCacheConfig.Workers), uint(cfg.RefreshCacheConfig.LruCacheSize), scope) if err != nil { logger.Errorf(ctx, "Could not create AutoRefreshCache in Executor. [%s]", err) return q, errors.Wrapf(errors.CacheFailed, err, "Error creating AutoRefreshCache") diff --git a/flyteplugins/go/tasks/plugins/testing/echo.go b/flyteplugins/go/tasks/plugins/testing/echo.go index 09c4dc53b1..00ca339f20 100644 --- a/flyteplugins/go/tasks/plugins/testing/echo.go +++ b/flyteplugins/go/tasks/plugins/testing/echo.go @@ -104,7 +104,7 @@ func copyInputsToOutputs(ctx context.Context, tCtx core.TaskExecutionContext) (c outputLiterals := make(map[string]*idlcore.Literal, len(inputToOutputVariableMappings)) for inputVariableName, outputVariableName := range inputToOutputVariableMappings { - outputLiterals[outputVariableName] = inputLiterals.Literals[inputVariableName] + outputLiterals[outputVariableName] = inputLiterals.GetLiterals()[inputVariableName] } outputLiteralMap := &idlcore.LiteralMap{ @@ -132,12 +132,12 @@ func compileInputToOutputVariableMappings(ctx context.Context, tCtx core.TaskExe } var inputs, outputs map[string]*idlcore.Variable - if taskTemplate.Interface != nil { - if taskTemplate.Interface.Inputs != nil { - inputs = taskTemplate.Interface.Inputs.Variables + if taskTemplate.GetInterface() != nil { + if taskTemplate.GetInterface().GetInputs() != nil { + inputs = taskTemplate.GetInterface().GetInputs().GetVariables() } - if taskTemplate.Interface.Outputs != nil { - outputs = taskTemplate.Interface.Outputs.Variables + if taskTemplate.GetInterface().GetOutputs() != nil { + outputs = taskTemplate.GetInterface().GetOutputs().GetVariables() } } diff --git a/flyteplugins/go/tasks/plugins/webapi/agent/client.go b/flyteplugins/go/tasks/plugins/webapi/agent/client.go index 148113fb38..04c464eaa3 100644 --- a/flyteplugins/go/tasks/plugins/webapi/agent/client.go +++ b/flyteplugins/go/tasks/plugins/webapi/agent/client.go @@ -130,16 +130,16 @@ func getAgentRegistry(ctx context.Context, cs *ClientSet) Registry { agentSupportedTaskCategories := make(map[string]struct{}) for _, agent := range res.GetAgents() { - deprecatedSupportedTaskTypes := agent.SupportedTaskTypes + deprecatedSupportedTaskTypes := agent.GetSupportedTaskTypes() for _, supportedTaskType := range deprecatedSupportedTaskTypes { - agent := &Agent{AgentDeployment: agentDeployment, IsSync: agent.IsSync} + agent := &Agent{AgentDeployment: agentDeployment, IsSync: agent.GetIsSync()} newAgentRegistry[supportedTaskType] = map[int32]*Agent{defaultTaskTypeVersion: agent} agentSupportedTaskCategories[supportedTaskType] = struct{}{} } - supportedTaskCategories := agent.SupportedTaskCategories + supportedTaskCategories := agent.GetSupportedTaskCategories() for _, supportedCategory := range supportedTaskCategories { - agent := &Agent{AgentDeployment: agentDeployment, IsSync: agent.IsSync} + agent := &Agent{AgentDeployment: agentDeployment, IsSync: agent.GetIsSync()} supportedCategoryName := supportedCategory.GetName() newAgentRegistry[supportedCategoryName] = map[int32]*Agent{supportedCategory.GetVersion(): agent} agentSupportedTaskCategories[supportedCategoryName] = struct{}{} diff --git a/flyteplugins/go/tasks/plugins/webapi/agent/integration_test.go b/flyteplugins/go/tasks/plugins/webapi/agent/integration_test.go index ba74fbf5d2..5348b71ebb 100644 --- a/flyteplugins/go/tasks/plugins/webapi/agent/integration_test.go +++ b/flyteplugins/go/tasks/plugins/webapi/agent/integration_test.go @@ -261,7 +261,7 @@ func newMockAsyncAgentPlugin() webapi.PluginEntry { mockCreateRequestMatcher := mock.MatchedBy(func(request *admin.CreateTaskRequest) bool { expectedArgs := []string{"pyflyte-fast-execute", "--output-prefix", "/tmp/123"} - return slices.Equal(request.Template.GetContainer().Args, expectedArgs) + return slices.Equal(request.GetTemplate().GetContainer().GetArgs(), expectedArgs) }) asyncAgentClient.On("CreateTask", mock.Anything, mockCreateRequestMatcher).Return(&admin.CreateTaskResponse{ ResourceMeta: []byte{1, 2, 3, 4}}, nil) diff --git a/flyteplugins/go/tasks/plugins/webapi/agent/plugin.go b/flyteplugins/go/tasks/plugins/webapi/agent/plugin.go index 4fffe2bee5..77a27b6699 100644 --- a/flyteplugins/go/tasks/plugins/webapi/agent/plugin.go +++ b/flyteplugins/go/tasks/plugins/webapi/agent/plugin.go @@ -45,6 +45,7 @@ type ResourceWrapper struct { Message string LogLinks []*flyteIdl.TaskLog CustomInfo *structpb.Struct + AgentError *admin.AgentError } // IsTerminal is used to avoid making network calls to the agent service if the resource is already in a terminal state. @@ -94,8 +95,8 @@ func (p *Plugin) Create(ctx context.Context, taskCtx webapi.TaskExecutionContext OutputPath: taskCtx.OutputWriter(), Task: taskCtx.TaskReader(), } - argTemplate = taskTemplate.GetContainer().Args - modifiedArgs, err := template.Render(ctx, taskTemplate.GetContainer().Args, templateParameters) + argTemplate = taskTemplate.GetContainer().GetArgs() + modifiedArgs, err := template.Render(ctx, taskTemplate.GetContainer().GetArgs(), templateParameters) if err != nil { return nil, nil, err } @@ -107,7 +108,7 @@ func (p *Plugin) Create(ctx context.Context, taskCtx webapi.TaskExecutionContext } outputPrefix := taskCtx.OutputWriter().GetOutputPrefixPath().String() - taskCategory := admin.TaskCategory{Name: taskTemplate.Type, Version: taskTemplate.TaskTypeVersion} + taskCategory := admin.TaskCategory{Name: taskTemplate.GetType(), Version: taskTemplate.GetTaskTypeVersion()} agent, isSync := p.getFinalAgent(&taskCategory, p.cfg) taskExecutionMetadata := buildTaskExecutionMetadata(taskCtx.TaskExecutionMetadata()) @@ -184,7 +185,7 @@ func (p *Plugin) ExecuteTaskSync( in, err := stream.Recv() if err != nil { - logger.Errorf(ctx, "failed to write output with err %s", err.Error()) + logger.Errorf(ctx, "failed to receive from server %s", err.Error()) return nil, nil, err } if in.GetHeader() == nil { @@ -195,11 +196,12 @@ func (p *Plugin) ExecuteTaskSync( resource := in.GetHeader().GetResource() return nil, ResourceWrapper{ - Phase: resource.Phase, - Outputs: resource.Outputs, - Message: resource.Message, - LogLinks: resource.LogLinks, - CustomInfo: resource.CustomInfo, + Phase: resource.GetPhase(), + Outputs: resource.GetOutputs(), + Message: resource.GetMessage(), + LogLinks: resource.GetLogLinks(), + CustomInfo: resource.GetCustomInfo(), + AgentError: resource.GetAgentError(), }, err } @@ -215,7 +217,7 @@ func (p *Plugin) Get(ctx context.Context, taskCtx webapi.GetContext) (latest web defer cancel() request := &admin.GetTaskRequest{ - TaskType: metadata.TaskCategory.Name, + TaskType: metadata.TaskCategory.GetName(), TaskCategory: &metadata.TaskCategory, ResourceMeta: metadata.AgentResourceMeta, } @@ -225,12 +227,12 @@ func (p *Plugin) Get(ctx context.Context, taskCtx webapi.GetContext) (latest web } return ResourceWrapper{ - Phase: res.Resource.Phase, - State: res.Resource.State, - Outputs: res.Resource.Outputs, - Message: res.Resource.Message, - LogLinks: res.Resource.LogLinks, - CustomInfo: res.Resource.CustomInfo, + Phase: res.GetResource().GetPhase(), + State: res.GetResource().GetState(), + Outputs: res.GetResource().GetOutputs(), + Message: res.GetResource().GetMessage(), + LogLinks: res.GetResource().GetLogLinks(), + CustomInfo: res.GetResource().GetCustomInfo(), }, nil } @@ -249,7 +251,7 @@ func (p *Plugin) Delete(ctx context.Context, taskCtx webapi.DeleteContext) error defer cancel() request := &admin.DeleteTaskRequest{ - TaskType: metadata.TaskCategory.Name, + TaskType: metadata.TaskCategory.GetName(), TaskCategory: &metadata.TaskCategory, ResourceMeta: metadata.AgentResourceMeta, } @@ -260,6 +262,10 @@ func (p *Plugin) Delete(ctx context.Context, taskCtx webapi.DeleteContext) error func (p *Plugin) Status(ctx context.Context, taskCtx webapi.StatusContext) (phase core.PhaseInfo, err error) { resource := taskCtx.Resource().(ResourceWrapper) taskInfo := &core.TaskInfo{Logs: resource.LogLinks, CustomInfo: resource.CustomInfo} + errorCode := pluginErrors.TaskFailedWithError + if resource.AgentError != nil && resource.AgentError.GetCode() != "" { + errorCode = resource.AgentError.GetCode() + } switch resource.Phase { case flyteIdl.TaskExecution_QUEUED: @@ -278,11 +284,10 @@ func (p *Plugin) Status(ctx context.Context, taskCtx webapi.StatusContext) (phas } return core.PhaseInfoSuccess(taskInfo), nil case flyteIdl.TaskExecution_ABORTED: - return core.PhaseInfoFailure(pluginErrors.TaskFailedWithError, "failed to run the job with aborted phase.\n"+resource.Message, taskInfo), nil + return core.PhaseInfoFailure(errorCode, "failed to run the job with aborted phase.", taskInfo), nil case flyteIdl.TaskExecution_FAILED: - return core.PhaseInfoFailure(pluginErrors.TaskFailedWithError, "failed to run the job.\n"+resource.Message, taskInfo), nil + return core.PhaseInfoFailure(errorCode, fmt.Sprintf("failed to run the job: %s", resource.Message), taskInfo), nil } - // The default phase is undefined. if resource.Phase != flyteIdl.TaskExecution_UNDEFINED { return core.PhaseInfoUndefined, pluginErrors.Errorf(core.SystemErrorCode, "unknown execution phase [%v].", resource.Phase) @@ -350,7 +355,7 @@ func (p *Plugin) getFinalAgent(taskCategory *admin.TaskCategory, cfg *Config) (* p.mu.RLock() defer p.mu.RUnlock() - if agent, exists := p.registry[taskCategory.Name][taskCategory.Version]; exists { + if agent, exists := p.registry[taskCategory.GetName()][taskCategory.GetVersion()]; exists { return agent.AgentDeployment, agent.IsSync } return &cfg.DefaultAgent, false @@ -362,7 +367,7 @@ func writeOutput(ctx context.Context, taskCtx webapi.StatusContext, outputs *fly return err } - if taskTemplate.Interface == nil || taskTemplate.Interface.Outputs == nil || taskTemplate.Interface.Outputs.Variables == nil { + if taskTemplate.GetInterface() == nil || taskTemplate.GetInterface().GetOutputs() == nil || taskTemplate.Interface.Outputs.Variables == nil { logger.Debugf(ctx, "The task declares no outputs. Skipping writing the outputs.") return nil } @@ -388,7 +393,7 @@ func buildTaskExecutionMetadata(taskExecutionMetadata core.TaskExecutionMetadata Annotations: taskExecutionMetadata.GetAnnotations(), K8SServiceAccount: taskExecutionMetadata.GetK8sServiceAccount(), EnvironmentVariables: taskExecutionMetadata.GetEnvironmentVariables(), - Identity: taskExecutionMetadata.GetSecurityContext().RunAs, + Identity: taskExecutionMetadata.GetSecurityContext().RunAs, // nolint:protogetter } } diff --git a/flyteplugins/go/tasks/plugins/webapi/agent/plugin_test.go b/flyteplugins/go/tasks/plugins/webapi/agent/plugin_test.go index 9e8c97903e..546ef59712 100644 --- a/flyteplugins/go/tasks/plugins/webapi/agent/plugin_test.go +++ b/flyteplugins/go/tasks/plugins/webapi/agent/plugin_test.go @@ -12,9 +12,9 @@ import ( agentMocks "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" - flyteIdl "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" flyteIdlCore "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" + pluginErrors "github.com/flyteorg/flyte/flyteplugins/go/tasks/errors" pluginsCore "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/core" pluginCoreMocks "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/core/mocks" webapiPlugin "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/webapi/mocks" @@ -180,7 +180,7 @@ func TestPlugin(t *testing.T) { t.Run("test TaskExecution_UNDEFINED Status", func(t *testing.T) { taskContext := new(webapiPlugin.StatusContext) taskContext.On("Resource").Return(ResourceWrapper{ - Phase: flyteIdl.TaskExecution_UNDEFINED, + Phase: flyteIdlCore.TaskExecution_UNDEFINED, Outputs: nil, Message: "", LogLinks: []*flyteIdlCore.TaskLog{{Uri: "http://localhost:3000/log", Name: "Log Link"}}, @@ -194,7 +194,7 @@ func TestPlugin(t *testing.T) { t.Run("test TaskExecution_QUEUED Status", func(t *testing.T) { taskContext := new(webapiPlugin.StatusContext) taskContext.On("Resource").Return(ResourceWrapper{ - Phase: flyteIdl.TaskExecution_QUEUED, + Phase: flyteIdlCore.TaskExecution_QUEUED, Outputs: nil, Message: "", LogLinks: []*flyteIdlCore.TaskLog{{Uri: "http://localhost:3000/log", Name: "Log Link"}}, @@ -208,7 +208,7 @@ func TestPlugin(t *testing.T) { t.Run("test TaskExecution_WAITING_FOR_RESOURCES Status", func(t *testing.T) { taskContext := new(webapiPlugin.StatusContext) taskContext.On("Resource").Return(ResourceWrapper{ - Phase: flyteIdl.TaskExecution_WAITING_FOR_RESOURCES, + Phase: flyteIdlCore.TaskExecution_WAITING_FOR_RESOURCES, Outputs: nil, Message: "", LogLinks: []*flyteIdlCore.TaskLog{{Uri: "http://localhost:3000/log", Name: "Log Link"}}, @@ -222,7 +222,7 @@ func TestPlugin(t *testing.T) { t.Run("test TaskExecution_INITIALIZING Status", func(t *testing.T) { taskContext := new(webapiPlugin.StatusContext) taskContext.On("Resource").Return(ResourceWrapper{ - Phase: flyteIdl.TaskExecution_INITIALIZING, + Phase: flyteIdlCore.TaskExecution_INITIALIZING, Outputs: nil, Message: "", LogLinks: []*flyteIdlCore.TaskLog{{Uri: "http://localhost:3000/log", Name: "Log Link"}}, @@ -236,7 +236,7 @@ func TestPlugin(t *testing.T) { t.Run("test TaskExecution_RUNNING Status", func(t *testing.T) { taskContext := new(webapiPlugin.StatusContext) taskContext.On("Resource").Return(ResourceWrapper{ - Phase: flyteIdl.TaskExecution_RUNNING, + Phase: flyteIdlCore.TaskExecution_RUNNING, Outputs: nil, Message: "", LogLinks: []*flyteIdlCore.TaskLog{{Uri: "http://localhost:3000/log", Name: "Log Link"}}, @@ -250,7 +250,7 @@ func TestPlugin(t *testing.T) { t.Run("test TaskExecution_ABORTED Status", func(t *testing.T) { taskContext := new(webapiPlugin.StatusContext) taskContext.On("Resource").Return(ResourceWrapper{ - Phase: flyteIdl.TaskExecution_ABORTED, + Phase: flyteIdlCore.TaskExecution_ABORTED, Outputs: nil, Message: "", LogLinks: []*flyteIdlCore.TaskLog{{Uri: "http://localhost:3000/log", Name: "Log Link"}}, @@ -264,7 +264,26 @@ func TestPlugin(t *testing.T) { t.Run("test TaskExecution_FAILED Status", func(t *testing.T) { taskContext := new(webapiPlugin.StatusContext) taskContext.On("Resource").Return(ResourceWrapper{ - Phase: flyteIdl.TaskExecution_FAILED, + Phase: flyteIdlCore.TaskExecution_FAILED, + Outputs: nil, + Message: "boom", + LogLinks: []*flyteIdlCore.TaskLog{{Uri: "http://localhost:3000/log", Name: "Log Link"}}, + AgentError: &admin.AgentError{ + Code: "ERROR: 500", + }, + }) + + phase, err := plugin.Status(context.Background(), taskContext) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhasePermanentFailure, phase.Phase()) + assert.Equal(t, "ERROR: 500", phase.Err().GetCode()) + assert.Equal(t, "failed to run the job: boom", phase.Err().GetMessage()) + }) + + t.Run("test TaskExecution_FAILED Status Without Agent Error", func(t *testing.T) { + taskContext := new(webapiPlugin.StatusContext) + taskContext.On("Resource").Return(ResourceWrapper{ + Phase: flyteIdlCore.TaskExecution_FAILED, Outputs: nil, Message: "", LogLinks: []*flyteIdlCore.TaskLog{{Uri: "http://localhost:3000/log", Name: "Log Link"}}, @@ -273,6 +292,7 @@ func TestPlugin(t *testing.T) { phase, err := plugin.Status(context.Background(), taskContext) assert.NoError(t, err) assert.Equal(t, pluginsCore.PhasePermanentFailure, phase.Phase()) + assert.Equal(t, pluginErrors.TaskFailedWithError, phase.Err().GetCode()) }) t.Run("test UNDEFINED Phase", func(t *testing.T) { diff --git a/flyteplugins/go/tasks/plugins/webapi/athena/utils.go b/flyteplugins/go/tasks/plugins/webapi/athena/utils.go index 761e81842a..1ed1fbaea4 100644 --- a/flyteplugins/go/tasks/plugins/webapi/athena/utils.go +++ b/flyteplugins/go/tasks/plugins/webapi/athena/utils.go @@ -19,12 +19,12 @@ func writeOutput(ctx context.Context, tCtx webapi.StatusContext, externalLocatio return err } - if taskTemplate.Interface == nil || taskTemplate.Interface.Outputs == nil || taskTemplate.Interface.Outputs.Variables == nil { + if taskTemplate.GetInterface() == nil || taskTemplate.GetInterface().GetOutputs() == nil || taskTemplate.Interface.Outputs.Variables == nil { logger.Infof(ctx, "The task declares no outputs. Skipping writing the outputs.") return nil } - resultsSchema, exists := taskTemplate.Interface.Outputs.Variables["results"] + resultsSchema, exists := taskTemplate.GetInterface().GetOutputs().GetVariables()["results"] if !exists { logger.Infof(ctx, "The task declares no outputs. Skipping writing the outputs.") return nil @@ -56,11 +56,11 @@ type QueryInfo struct { } func validateHiveQuery(hiveQuery pluginsIdl.QuboleHiveJob) error { - if hiveQuery.Query == nil { + if hiveQuery.GetQuery() == nil { return errors.Errorf(errors.BadTaskSpecification, "Query is a required field.") } - if len(hiveQuery.Query.Query) == 0 { + if len(hiveQuery.GetQuery().GetQuery()) == 0 { return errors.Errorf(errors.BadTaskSpecification, "Query statement is a required field.") } @@ -68,7 +68,7 @@ func validateHiveQuery(hiveQuery pluginsIdl.QuboleHiveJob) error { } func validatePrestoQuery(prestoQuery pluginsIdl.PrestoQuery) error { - if len(prestoQuery.Statement) == 0 { + if len(prestoQuery.GetStatement()) == 0 { return errors.Errorf(errors.BadTaskSpecification, "Statement is a required field.") } @@ -81,7 +81,7 @@ func extractQueryInfo(ctx context.Context, tCtx webapi.TaskExecutionContextReade return QueryInfo{}, err } - switch task.Type { + switch task.GetType() { case "hive": custom := task.GetCustom() hiveQuery := pluginsIdl.QuboleHiveJob{} @@ -95,8 +95,8 @@ func extractQueryInfo(ctx context.Context, tCtx webapi.TaskExecutionContextReade } outputs, err := template.Render(ctx, []string{ - hiveQuery.Query.Query, - hiveQuery.ClusterLabel, + hiveQuery.GetQuery().GetQuery(), + hiveQuery.GetClusterLabel(), }, template.Parameters{ TaskExecMetadata: tCtx.TaskExecutionMetadata(), Inputs: tCtx.InputReader(), @@ -124,10 +124,10 @@ func extractQueryInfo(ctx context.Context, tCtx webapi.TaskExecutionContextReade } outputs, err := template.Render(ctx, []string{ - prestoQuery.RoutingGroup, - prestoQuery.Catalog, - prestoQuery.Schema, - prestoQuery.Statement, + prestoQuery.GetRoutingGroup(), + prestoQuery.GetCatalog(), + prestoQuery.GetSchema(), + prestoQuery.GetStatement(), }, template.Parameters{ TaskExecMetadata: tCtx.TaskExecutionMetadata(), Inputs: tCtx.InputReader(), @@ -146,5 +146,5 @@ func extractQueryInfo(ctx context.Context, tCtx webapi.TaskExecutionContextReade }, nil } - return QueryInfo{}, errors.Errorf(ErrUser, "Unexpected task type [%v].", task.Type) + return QueryInfo{}, errors.Errorf(ErrUser, "Unexpected task type [%v].", task.GetType()) } diff --git a/flyteplugins/go/tasks/plugins/webapi/bigquery/plugin.go b/flyteplugins/go/tasks/plugins/webapi/bigquery/plugin.go index ad7da5f042..fca1eee954 100644 --- a/flyteplugins/go/tasks/plugins/webapi/bigquery/plugin.go +++ b/flyteplugins/go/tasks/plugins/webapi/bigquery/plugin.go @@ -95,17 +95,17 @@ func (p Plugin) createImpl(ctx context.Context, taskCtx webapi.TaskExecutionCont return nil, nil, pluginErrors.Wrapf(pluginErrors.RuntimeFailure, err, "unable to get bigquery client") } - if taskTemplate.Type == bigqueryQueryJobTask { + if taskTemplate.GetType() == bigqueryQueryJobTask { job, err = createQueryJob(jobID, taskTemplate.GetCustom(), inputs) } else { - err = pluginErrors.Errorf(pluginErrors.BadTaskSpecification, "unexpected task type [%v]", taskTemplate.Type) + err = pluginErrors.Errorf(pluginErrors.BadTaskSpecification, "unexpected task type [%v]", taskTemplate.GetType()) } if err != nil { return nil, nil, err } - job.Configuration.Query.Query = taskTemplate.GetSql().Statement + job.Configuration.Query.Query = taskTemplate.GetSql().GetStatement() job.Configuration.Labels = taskCtx.TaskExecutionMetadata().GetLabels() resp, err := client.Jobs.Insert(job.JobReference.ProjectId, job).Do() @@ -317,12 +317,12 @@ func writeOutput(ctx context.Context, tCtx webapi.StatusContext, OutputLocation return err } - if taskTemplate.Interface == nil || taskTemplate.Interface.Outputs == nil || taskTemplate.Interface.Outputs.Variables == nil { + if taskTemplate.GetInterface() == nil || taskTemplate.GetInterface().GetOutputs() == nil || taskTemplate.Interface.Outputs.Variables == nil { logger.Infof(ctx, "The task declares no outputs. Skipping writing the outputs.") return nil } - resultsStructuredDatasetType, exists := taskTemplate.Interface.Outputs.Variables["results"] + resultsStructuredDatasetType, exists := taskTemplate.GetInterface().GetOutputs().GetVariables()["results"] if !exists { logger.Infof(ctx, "The task declares no outputs. Skipping writing the outputs.") return nil diff --git a/flyteplugins/go/tasks/plugins/webapi/bigquery/plugin_test.go b/flyteplugins/go/tasks/plugins/webapi/bigquery/plugin_test.go index 939fe0577a..8682350986 100644 --- a/flyteplugins/go/tasks/plugins/webapi/bigquery/plugin_test.go +++ b/flyteplugins/go/tasks/plugins/webapi/bigquery/plugin_test.go @@ -105,9 +105,9 @@ func TestOutputWriter(t *testing.T) { assert.NoError(t, err) sd := literals.GetLiterals()["results"].GetScalar().GetStructuredDataset() - assert.Equal(t, sd.Uri, outputLocation) - assert.Equal(t, sd.Metadata.GetStructuredDatasetType().Columns[0].Name, "col1") - assert.Equal(t, sd.Metadata.GetStructuredDatasetType().Columns[0].LiteralType.GetSimple(), flyteIdlCore.SimpleType_INTEGER) + assert.Equal(t, sd.GetUri(), outputLocation) + assert.Equal(t, sd.GetMetadata().GetStructuredDatasetType().GetColumns()[0].GetName(), "col1") + assert.Equal(t, sd.GetMetadata().GetStructuredDatasetType().GetColumns()[0].GetLiteralType().GetSimple(), flyteIdlCore.SimpleType_INTEGER) if ee != nil { assert.NoError(t, ds.WriteProtobuf(ctx, outputWriter.GetErrorPath(), storage.Options{}, ee)) @@ -307,9 +307,9 @@ func TestHandleErrorResult(t *testing.T) { phaseInfo := handleErrorResult(test.reason, "message", &taskInfo) assert.Equal(t, test.phase, phaseInfo.Phase()) - assert.Equal(t, test.reason, phaseInfo.Err().Code) - assert.Equal(t, test.errorKind, phaseInfo.Err().Kind) - assert.Equal(t, "message", phaseInfo.Err().Message) + assert.Equal(t, test.reason, phaseInfo.Err().GetCode()) + assert.Equal(t, test.errorKind, phaseInfo.Err().GetKind()) + assert.Equal(t, "message", phaseInfo.Err().GetMessage()) }) } } diff --git a/flyteplugins/go/tasks/plugins/webapi/bigquery/query_job.go b/flyteplugins/go/tasks/plugins/webapi/bigquery/query_job.go index 7ce788e0fe..fe558f9d0c 100644 --- a/flyteplugins/go/tasks/plugins/webapi/bigquery/query_job.go +++ b/flyteplugins/go/tasks/plugins/webapi/bigquery/query_job.go @@ -155,7 +155,7 @@ func unmarshalQueryJobConfig(structObj *structpb.Struct) (*QueryJobConfig, error } func getJobConfigurationQuery(custom *QueryJobConfig, inputs *flyteIdlCore.LiteralMap) (*bigquery.JobConfigurationQuery, error) { - queryParameters, err := getQueryParameters(inputs.Literals) + queryParameters, err := getQueryParameters(inputs.GetLiterals()) if err != nil { return nil, pluginErrors.Errorf(pluginErrors.BadTaskSpecification, "unable build query parameters [%v]", err.Error()) @@ -216,7 +216,7 @@ func getQueryParameters(literalMap map[string]*flyteIdlCore.Literal) ([]*bigquer func getQueryParameter(literal *flyteIdlCore.Literal) (*bigquery.QueryParameterType, *bigquery.QueryParameterValue, error) { if scalar := literal.GetScalar(); scalar != nil { if primitive := scalar.GetPrimitive(); primitive != nil { - switch primitive.Value.(type) { + switch primitive.GetValue().(type) { case *flyteIdlCore.Primitive_Integer: integerType := bigquery.QueryParameterType{Type: "INT64"} integerValue := bigquery.QueryParameterValue{ diff --git a/flyteplugins/go/tasks/plugins/webapi/databricks/plugin.go b/flyteplugins/go/tasks/plugins/webapi/databricks/plugin.go index 6ae9a1dbe5..d889392c59 100644 --- a/flyteplugins/go/tasks/plugins/webapi/databricks/plugin.go +++ b/flyteplugins/go/tasks/plugins/webapi/databricks/plugin.go @@ -96,8 +96,8 @@ func (p Plugin) Create(ctx context.Context, taskCtx webapi.TaskExecutionContextR } // override the default token in propeller - if len(sparkJob.DatabricksToken) != 0 { - token = sparkJob.DatabricksToken + if len(sparkJob.GetDatabricksToken()) != 0 { + token = sparkJob.GetDatabricksToken() } modifiedArgs, err := template.Render(ctx, container.GetArgs(), template.Parameters{ TaskExecMetadata: taskCtx.TaskExecutionMetadata(), @@ -110,20 +110,20 @@ func (p Plugin) Create(ctx context.Context, taskCtx webapi.TaskExecutionContextR } databricksJob := make(map[string]interface{}) - err = utils.UnmarshalStructToObj(sparkJob.DatabricksConf, &databricksJob) + err = utils.UnmarshalStructToObj(sparkJob.GetDatabricksConf(), &databricksJob) if err != nil { - return nil, nil, fmt.Errorf("failed to unmarshal databricksJob: %v: %v", sparkJob.DatabricksConf, err) + return nil, nil, fmt.Errorf("failed to unmarshal databricksJob: %v: %v", sparkJob.GetDatabricksConf(), err) } // If "existing_cluster_id" is in databricks_job, then we don't need to set "new_cluster" // Refer the docs here: https://docs.databricks.com/en/workflows/jobs/jobs-2.0-api.html#request-structure if clusterConfig, ok := databricksJob[newCluster].(map[string]interface{}); ok { if dockerConfig, ok := clusterConfig[dockerImage].(map[string]interface{}); !ok || dockerConfig[url] == nil { - clusterConfig[dockerImage] = map[string]string{url: container.Image} + clusterConfig[dockerImage] = map[string]string{url: container.GetImage()} } - if clusterConfig[sparkConfig] == nil && len(sparkJob.SparkConf) != 0 { - clusterConfig[sparkConfig] = sparkJob.SparkConf + if clusterConfig[sparkConfig] == nil && len(sparkJob.GetSparkConf()) != 0 { + clusterConfig[sparkConfig] = sparkJob.GetSparkConf() } } databricksJob[sparkPythonTask] = map[string]interface{}{pythonFile: p.cfg.EntrypointFile, parameters: modifiedArgs} @@ -299,7 +299,7 @@ func writeOutput(ctx context.Context, taskCtx webapi.StatusContext) error { if err != nil { return err } - if taskTemplate.Interface == nil || taskTemplate.Interface.Outputs == nil || taskTemplate.Interface.Outputs.Variables == nil { + if taskTemplate.GetInterface() == nil || taskTemplate.GetInterface().GetOutputs() == nil || taskTemplate.Interface.Outputs.Variables == nil { logger.Infof(ctx, "The task declares no outputs. Skipping writing the outputs.") return nil } diff --git a/flyteplugins/go/tasks/plugins/webapi/databricks/plugin_test.go b/flyteplugins/go/tasks/plugins/webapi/databricks/plugin_test.go index 228914af93..3cdecf6872 100644 --- a/flyteplugins/go/tasks/plugins/webapi/databricks/plugin_test.go +++ b/flyteplugins/go/tasks/plugins/webapi/databricks/plugin_test.go @@ -148,7 +148,7 @@ func TestCreateTaskInfo(t *testing.T) { taskInfo := createTaskInfo("run-id", "job-id", testInstance) assert.Equal(t, 1, len(taskInfo.Logs)) - assert.Equal(t, taskInfo.Logs[0].Uri, "https://test-account.cloud.databricks.com/#job/job-id/run/run-id") - assert.Equal(t, taskInfo.Logs[0].Name, "Databricks Console") + assert.Equal(t, taskInfo.Logs[0].GetUri(), "https://test-account.cloud.databricks.com/#job/job-id/run/run-id") + assert.Equal(t, taskInfo.Logs[0].GetName(), "Databricks Console") }) } diff --git a/flyteplugins/go/tasks/plugins/webapi/snowflake/plugin.go b/flyteplugins/go/tasks/plugins/webapi/snowflake/plugin.go index 02bf947fd4..c0728a79a7 100644 --- a/flyteplugins/go/tasks/plugins/webapi/snowflake/plugin.go +++ b/flyteplugins/go/tasks/plugins/webapi/snowflake/plugin.go @@ -84,7 +84,7 @@ func (p Plugin) Create(ctx context.Context, taskCtx webapi.TaskExecutionContextR config := task.GetConfig() outputs, err := template.Render(ctx, []string{ - task.GetSql().Statement, + task.GetSql().GetStatement(), }, template.Parameters{ TaskExecMetadata: taskCtx.TaskExecutionMetadata(), Inputs: taskCtx.InputReader(), diff --git a/flyteplugins/go/tasks/plugins/webapi/snowflake/plugin_test.go b/flyteplugins/go/tasks/plugins/webapi/snowflake/plugin_test.go index 7657a9e315..3de8f8a6b8 100644 --- a/flyteplugins/go/tasks/plugins/webapi/snowflake/plugin_test.go +++ b/flyteplugins/go/tasks/plugins/webapi/snowflake/plugin_test.go @@ -57,8 +57,8 @@ func TestCreateTaskInfo(t *testing.T) { taskInfo := createTaskInfo("d5493e36", "test-account") assert.Equal(t, 1, len(taskInfo.Logs)) - assert.Equal(t, taskInfo.Logs[0].Uri, "https://test-account.snowflakecomputing.com/console#/monitoring/queries/detail?queryId=d5493e36") - assert.Equal(t, taskInfo.Logs[0].Name, "Snowflake Console") + assert.Equal(t, taskInfo.Logs[0].GetUri(), "https://test-account.snowflakecomputing.com/console#/monitoring/queries/detail?queryId=d5493e36") + assert.Equal(t, taskInfo.Logs[0].GetName(), "Snowflake Console") }) } diff --git a/flytepropeller/.golangci.yml b/flytepropeller/.golangci.yml index 6d13f4a3b6..77107079d0 100644 --- a/flytepropeller/.golangci.yml +++ b/flytepropeller/.golangci.yml @@ -1,35 +1,25 @@ -# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. -# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY: -# -# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst - run: skip-dirs: - pkg/client - linters: disable-all: true enable: - - deadcode - errcheck - - gas + - gosec - gci - goconst - goimports - - golint - gosimple - govet - ineffassign - misspell - nakedret - staticcheck - - structcheck - typecheck - unconvert - unparam - unused - - varcheck - + - protogetter linters-settings: gci: custom-order: true @@ -38,6 +28,8 @@ linters-settings: - default - prefix(github.com/flyteorg) skip-generated: true + goconst: + ignore-tests: true issues: exclude: - copylocks diff --git a/flytepropeller/cmd/kubectl-flyte/cmd/compile.go b/flytepropeller/cmd/kubectl-flyte/cmd/compile.go index 056b546849..c91e10c183 100644 --- a/flytepropeller/cmd/kubectl-flyte/cmd/compile.go +++ b/flytepropeller/cmd/kubectl-flyte/cmd/compile.go @@ -76,18 +76,18 @@ func (c *CompileOpts) compileWorkflowCmd() error { if err != nil { return err } - err = ioutil.WriteFile(c.protoFile+".yaml", b, os.ModePerm) + err = os.WriteFile(c.protoFile+".yaml", b, os.ModePerm) // #nosec G306 if err != nil { return err } } - compiledTasks, err := compileTasks(wfClosure.Tasks) + compiledTasks, err := compileTasks(wfClosure.GetTasks()) if err != nil { return err } - compileWfClosure, err := compiler.CompileWorkflow(wfClosure.Workflow, []*core.WorkflowTemplate{}, compiledTasks, []common.InterfaceProvider{}) + compileWfClosure, err := compiler.CompileWorkflow(wfClosure.GetWorkflow(), []*core.WorkflowTemplate{}, compiledTasks, []common.InterfaceProvider{}) if err != nil { return err } @@ -100,7 +100,7 @@ func (c *CompileOpts) compileWorkflowCmd() error { } if c.outputPath != "" { - return ioutil.WriteFile(c.outputPath, o, os.ModePerm) + return os.WriteFile(c.outputPath, o, os.ModePerm) // #nosec G306 } fmt.Printf("%v", string(o)) return nil diff --git a/flytepropeller/cmd/kubectl-flyte/cmd/create.go b/flytepropeller/cmd/kubectl-flyte/cmd/create.go index 2feeb8ec8e..3cf463b604 100644 --- a/flytepropeller/cmd/kubectl-flyte/cmd/create.go +++ b/flytepropeller/cmd/kubectl-flyte/cmd/create.go @@ -160,12 +160,12 @@ func (c *CreateOpts) createWorkflowFromProto() error { return err } - compiledTasks, err := compileTasks(wfClosure.Tasks) + compiledTasks, err := compileTasks(wfClosure.GetTasks()) if err != nil { return err } - wf, err := compiler.CompileWorkflow(wfClosure.Workflow, []*core.WorkflowTemplate{}, compiledTasks, []common.InterfaceProvider{}) + wf, err := compiler.CompileWorkflow(wfClosure.GetWorkflow(), []*core.WorkflowTemplate{}, compiledTasks, []common.InterfaceProvider{}) if err != nil { return err } @@ -182,8 +182,8 @@ func (c *CreateOpts) createWorkflowFromProto() error { if len(c.execID) > 0 { executionID = &core.WorkflowExecutionIdentifier{ Name: c.execID, - Domain: wfClosure.Workflow.Id.Domain, - Project: wfClosure.Workflow.Id.Project, + Domain: wfClosure.GetWorkflow().GetId().GetDomain(), + Project: wfClosure.GetWorkflow().GetId().GetProject(), } } diff --git a/flytepropeller/cmd/kubectl-flyte/cmd/create_test.go b/flytepropeller/cmd/kubectl-flyte/cmd/create_test.go index 5036201482..65bb2ecae1 100644 --- a/flytepropeller/cmd/kubectl-flyte/cmd/create_test.go +++ b/flytepropeller/cmd/kubectl-flyte/cmd/create_test.go @@ -3,7 +3,6 @@ package cmd import ( "encoding/json" "flag" - "io/ioutil" "os" "path/filepath" "testing" @@ -113,7 +112,7 @@ func generateSimpleWorkflow(t *testing.T) { marshaller := &jsonpb.Marshaler{} s, err := marshaller.MarshalToString(&closure) assert.NoError(t, err) - assert.NoError(t, ioutil.WriteFile(filepath.Join("testdata", "workflow.json.golden"), []byte(s), os.ModePerm)) + assert.NoError(t, os.WriteFile(filepath.Join("testdata", "workflow.json.golden"), []byte(s), os.ModePerm)) // #nosec G306 m := map[string]interface{}{} err = json.Unmarshal([]byte(s), &m) @@ -121,11 +120,11 @@ func generateSimpleWorkflow(t *testing.T) { b, err := yaml.Marshal(m) assert.NoError(t, err) - assert.NoError(t, ioutil.WriteFile(filepath.Join("testdata", "workflow.yaml.golden"), b, os.ModePerm)) + assert.NoError(t, os.WriteFile(filepath.Join("testdata", "workflow.yaml.golden"), b, os.ModePerm)) // #nosec G306 raw, err := proto.Marshal(&closure) assert.NoError(t, err) - assert.NoError(t, ioutil.WriteFile(filepath.Join("testdata", "workflow.pb.golden"), raw, os.ModePerm)) + assert.NoError(t, os.WriteFile(filepath.Join("testdata", "workflow.pb.golden"), raw, os.ModePerm)) // #nosec G306 } func generateWorkflowWithInputs(t *testing.T) { @@ -242,7 +241,7 @@ func marshalGolden(t *testing.T, message proto.Message, filename string) { marshaller := &jsonpb.Marshaler{} s, err := marshaller.MarshalToString(message) assert.NoError(t, err) - assert.NoError(t, ioutil.WriteFile(filepath.Join("testdata", filename+".json.golden"), []byte(s), os.ModePerm)) + assert.NoError(t, os.WriteFile(filepath.Join("testdata", filename+".json.golden"), []byte(s), os.ModePerm)) // #nosec G306 m := map[string]interface{}{} err = json.Unmarshal([]byte(s), &m) @@ -250,28 +249,28 @@ func marshalGolden(t *testing.T, message proto.Message, filename string) { b, err := yaml.Marshal(m) assert.NoError(t, err) - assert.NoError(t, ioutil.WriteFile(filepath.Join("testdata", filename+".yaml.golden"), b, os.ModePerm)) + assert.NoError(t, os.WriteFile(filepath.Join("testdata", filename+".yaml.golden"), b, os.ModePerm)) // #nosec G306 raw, err := proto.Marshal(message) assert.NoError(t, err) - assert.NoError(t, ioutil.WriteFile(filepath.Join("testdata", filename+".pb.golden"), raw, os.ModePerm)) + assert.NoError(t, os.WriteFile(filepath.Join("testdata", filename+".pb.golden"), raw, os.ModePerm)) // #nosec G306 } func testCompile(t *testing.T) { f := func(t *testing.T, filePath, format string) { - raw, err := ioutil.ReadFile(filepath.Join("testdata", filePath)) + raw, err := os.ReadFile(filepath.Join("testdata", filePath)) assert.NoError(t, err) wf := &core.WorkflowClosure{} err = unmarshal(raw, format, wf) assert.NoError(t, err) assert.NotNil(t, wf) - assert.Equal(t, 2, len(wf.Tasks)) - if len(wf.Tasks) == 2 { - c := wf.Tasks[0].GetContainer() + assert.Equal(t, 2, len(wf.GetTasks())) + if len(wf.GetTasks()) == 2 { + c := wf.GetTasks()[0].GetContainer() assert.NotNil(t, c) - compiledTasks, err := compileTasks(wf.Tasks) + compiledTasks, err := compileTasks(wf.GetTasks()) assert.NoError(t, err) - compiledWf, err := compiler.CompileWorkflow(wf.Workflow, []*core.WorkflowTemplate{}, compiledTasks, []common.InterfaceProvider{}) + compiledWf, err := compiler.CompileWorkflow(wf.GetWorkflow(), []*core.WorkflowTemplate{}, compiledTasks, []common.InterfaceProvider{}) assert.NoError(t, err) _, err = k8s.BuildFlyteWorkflow(compiledWf, nil, nil, "") assert.NoError(t, err) diff --git a/flytepropeller/events/admin_eventsink.go b/flytepropeller/events/admin_eventsink.go index 3da6cca421..cc9c57661c 100644 --- a/flytepropeller/events/admin_eventsink.go +++ b/flytepropeller/events/admin_eventsink.go @@ -116,17 +116,17 @@ func IDFromMessage(message proto.Message) ([]byte, error) { var id string switch eventMessage := message.(type) { case *event.WorkflowExecutionEvent: - wid := eventMessage.ExecutionId - id = fmt.Sprintf("%s:%s:%s:%d", wid.Project, wid.Domain, wid.Name, eventMessage.Phase) + wid := eventMessage.GetExecutionId() + id = fmt.Sprintf("%s:%s:%s:%d", wid.GetProject(), wid.GetDomain(), wid.GetName(), eventMessage.GetPhase()) case *event.NodeExecutionEvent: - nid := eventMessage.Id - wid := nid.ExecutionId - id = fmt.Sprintf("%s:%s:%s:%s:%s:%d", wid.Project, wid.Domain, wid.Name, nid.NodeId, eventMessage.RetryGroup, eventMessage.Phase) + nid := eventMessage.GetId() + wid := nid.GetExecutionId() + id = fmt.Sprintf("%s:%s:%s:%s:%s:%d", wid.GetProject(), wid.GetDomain(), wid.GetName(), nid.GetNodeId(), eventMessage.GetRetryGroup(), eventMessage.GetPhase()) case *event.TaskExecutionEvent: - tid := eventMessage.TaskId - nid := eventMessage.ParentNodeExecutionId - wid := nid.ExecutionId - id = fmt.Sprintf("%s:%s:%s:%s:%s:%s:%d:%d:%d", wid.Project, wid.Domain, wid.Name, nid.NodeId, tid.Name, tid.Version, eventMessage.RetryAttempt, eventMessage.Phase, eventMessage.PhaseVersion) + tid := eventMessage.GetTaskId() + nid := eventMessage.GetParentNodeExecutionId() + wid := nid.GetExecutionId() + id = fmt.Sprintf("%s:%s:%s:%s:%s:%s:%d:%d:%d", wid.GetProject(), wid.GetDomain(), wid.GetName(), nid.GetNodeId(), tid.GetName(), tid.GetVersion(), eventMessage.GetRetryAttempt(), eventMessage.GetPhase(), eventMessage.GetPhaseVersion()) default: return nil, fmt.Errorf("unknown event type [%s]", eventMessage.String()) } @@ -140,7 +140,7 @@ func initializeAdminClientFromConfig(ctx context.Context, config *Config) (clien grpcOptions := []grpcRetry.CallOption{ grpcRetry.WithBackoff(grpcRetry.BackoffExponentialWithJitter(time.Duration(config.BackoffScalar)*time.Millisecond, config.GetBackoffJitter(ctx))), - grpcRetry.WithMax(uint(config.MaxRetries)), + grpcRetry.WithMax(uint(config.MaxRetries)), // #nosec G115 } opt := grpc.WithChainUnaryInterceptor( diff --git a/flytepropeller/events/admin_eventsink_test.go b/flytepropeller/events/admin_eventsink_test.go index 510371d056..e3a0d57dba 100644 --- a/flytepropeller/events/admin_eventsink_test.go +++ b/flytepropeller/events/admin_eventsink_test.go @@ -86,7 +86,7 @@ func TestAdminWorkflowEvent(t *testing.T) { "CreateWorkflowEvent", ctx, mock.MatchedBy(func(req *admin.WorkflowExecutionEventRequest) bool { - return req.Event == wfEvent + return req.GetEvent() == wfEvent }, )).Return(&admin.WorkflowExecutionEventResponse{}, nil) @@ -104,7 +104,7 @@ func TestAdminNodeEvent(t *testing.T) { "CreateNodeEvent", ctx, mock.MatchedBy(func(req *admin.NodeExecutionEventRequest) bool { - return req.Event == nodeEvent + return req.GetEvent() == nodeEvent }), ).Return(&admin.NodeExecutionEventResponse{}, nil) @@ -122,7 +122,7 @@ func TestAdminTaskEvent(t *testing.T) { "CreateTaskEvent", ctx, mock.MatchedBy(func(req *admin.TaskExecutionEventRequest) bool { - return req.Event == taskEvent + return req.GetEvent() == taskEvent }), ).Return(&admin.TaskExecutionEventResponse{}, nil) @@ -159,7 +159,7 @@ func TestAdminRateLimitError(t *testing.T) { "CreateTaskEvent", ctx, mock.MatchedBy(func(req *admin.TaskExecutionEventRequest) bool { - return req.Event == taskEvent + return req.GetEvent() == taskEvent }), ).Return(&admin.TaskExecutionEventResponse{}, nil) diff --git a/flytepropeller/events/errors/errors.go b/flytepropeller/events/errors/errors.go index 2d3e02e0df..11c603bad8 100644 --- a/flytepropeller/events/errors/errors.go +++ b/flytepropeller/events/errors/errors.go @@ -74,7 +74,7 @@ func WrapError(err error) error { phase := reason.AlreadyInTerminalState.GetCurrentPhase() return wrapf(EventAlreadyInTerminalStateError, err, fmt.Sprintf("conflicting events; destination: %v", phase)) case *admin.EventFailureReason_IncompatibleCluster: - return wrapf(EventIncompatibleCusterError, err, fmt.Sprintf("conflicting execution cluster; expected: %v", reason.IncompatibleCluster.Cluster)) + return wrapf(EventIncompatibleCusterError, err, fmt.Sprintf("conflicting execution cluster; expected: %v", reason.IncompatibleCluster.GetCluster())) default: logger.Warnf(context.Background(), "found unexpected type in details of grpc status: %v", reason) } diff --git a/flytepropeller/events/event_recorder.go b/flytepropeller/events/event_recorder.go index 310797f081..9390e04bf4 100644 --- a/flytepropeller/events/event_recorder.go +++ b/flytepropeller/events/event_recorder.go @@ -86,8 +86,8 @@ func (r *eventRecorder) RecordWorkflowEvent(ctx context.Context, e *event.Workfl // If error message too large, truncate to mitigate grpc message size limit. Split the truncated size equally between // the beginning and the end of the message to capture the most relevant information. func truncateErrorMessage(err *core.ExecutionError, length int) { - if len(err.Message) > length { - err.Message = fmt.Sprintf("%s\n%s\n%s", err.Message[:length/2], truncationIndicator, err.Message[(len(err.Message)-length/2):]) + if len(err.GetMessage()) > length { + err.Message = fmt.Sprintf("%s\n%s\n%s", err.GetMessage()[:length/2], truncationIndicator, err.GetMessage()[(len(err.GetMessage())-length/2):]) } } diff --git a/flytepropeller/events/event_recorder_test.go b/flytepropeller/events/event_recorder_test.go index 2b633b72ff..32c1193361 100644 --- a/flytepropeller/events/event_recorder_test.go +++ b/flytepropeller/events/event_recorder_test.go @@ -96,6 +96,6 @@ func TestTruncateErrorMessage(t *testing.T) { } truncateErrorMessage(&executionError, length) - assert.True(t, len(executionError.Message) <= length+len(truncationIndicator)+2) + assert.True(t, len(executionError.GetMessage()) <= length+len(truncationIndicator)+2) } } diff --git a/flytepropeller/events/eventsink_test.go b/flytepropeller/events/eventsink_test.go index 46aa5e46db..d488398d7b 100644 --- a/flytepropeller/events/eventsink_test.go +++ b/flytepropeller/events/eventsink_test.go @@ -62,11 +62,11 @@ func TestFileEvent(t *testing.T) { taskEvent := &event.TaskExecutionEvent{ TaskId: &core.Identifier{ ResourceType: core.ResourceType_TASK, - Project: executionID.Project, - Domain: executionID.Domain, - Name: executionID.Name, + Project: executionID.GetProject(), + Domain: executionID.GetDomain(), + Name: executionID.GetName(), }, - ParentNodeExecutionId: nodeEvent.Id, + ParentNodeExecutionId: nodeEvent.GetId(), Phase: core.TaskExecution_FAILED, OccurredAt: now, } diff --git a/flytepropeller/events/local_eventsink.go b/flytepropeller/events/local_eventsink.go index fdcd5408a4..2300942bab 100644 --- a/flytepropeller/events/local_eventsink.go +++ b/flytepropeller/events/local_eventsink.go @@ -26,13 +26,13 @@ func (s *localSink) Sink(ctx context.Context, message proto.Message) error { switch e := message.(type) { case *event.WorkflowExecutionEvent: eventOutput = fmt.Sprintf("[--WF EVENT--] %s, Phase: %s, OccuredAt: %s\n", - e.ExecutionId, e.Phase, e.OccurredAt.AsTime().String()) + e.GetExecutionId(), e.GetPhase(), e.GetOccurredAt().AsTime().String()) case *event.NodeExecutionEvent: eventOutput = fmt.Sprintf("[--NODE EVENT--] %s, Phase: %s, OccuredAt: %s\n", - e.Id, e.Phase, e.OccurredAt.AsTime().String()) + e.GetId(), e.GetPhase(), e.GetOccurredAt().AsTime().String()) case *event.TaskExecutionEvent: eventOutput = fmt.Sprintf("[--TASK EVENT--] %s,%s, Phase: %s, OccuredAt: %s\n", - e.TaskId, e.ParentNodeExecutionId, e.Phase, e.OccurredAt.AsTime().String()) + e.GetTaskId(), e.GetParentNodeExecutionId(), e.GetPhase(), e.GetOccurredAt().AsTime().String()) } return s.writer.Write(ctx, eventOutput) diff --git a/flytepropeller/events/node_event_recorder.go b/flytepropeller/events/node_event_recorder.go index 8beb488ce6..c283401614 100644 --- a/flytepropeller/events/node_event_recorder.go +++ b/flytepropeller/events/node_event_recorder.go @@ -69,7 +69,7 @@ func (r *nodeEventRecorder) RecordNodeEvent(ctx context.Context, ev *event.NodeE logger.Infof(ctx, "Failed to record node event [%+v] with err: %v", ev, err) // Only attempt to retry sending an event in the case we tried to send raw output data inline if eventConfig.FallbackToOutputReference && rawOutputPolicy == config.RawOutputPolicyInline { - logger.Infof(ctx, "Falling back to sending node event outputs by reference for [%+v]", ev.Id) + logger.Infof(ctx, "Falling back to sending node event outputs by reference for [%+v]", ev.GetId()) return r.handleFailure(ctx, origEvent, err) } return err diff --git a/flytepropeller/events/node_event_recorder_test.go b/flytepropeller/events/node_event_recorder_test.go index 5d2025b525..d3321d649e 100644 --- a/flytepropeller/events/node_event_recorder_test.go +++ b/flytepropeller/events/node_event_recorder_test.go @@ -82,7 +82,7 @@ func TestRecordNodeEvent_Success_InlineOutputs(t *testing.T) { store: mockStore, } err := recorder.RecordNodeEvent(ctx, getReferenceNodeEv(), inlineEventConfig) - assert.Equal(t, deckURI, nodeEvent.DeckUri) + assert.Equal(t, deckURI, nodeEvent.GetDeckUri()) assert.NoError(t, err) } diff --git a/flytepropeller/events/task_event_recorder.go b/flytepropeller/events/task_event_recorder.go index 8b531ae85f..3882802dd4 100644 --- a/flytepropeller/events/task_event_recorder.go +++ b/flytepropeller/events/task_event_recorder.go @@ -69,7 +69,7 @@ func (r *taskEventRecorder) RecordTaskEvent(ctx context.Context, ev *event.TaskE logger.Infof(ctx, "Failed to record task event [%+v] with err: %v", ev, err) // Only attempt to retry sending an event in the case we tried to send raw output data inline if eventConfig.FallbackToOutputReference && rawOutputPolicy == config.RawOutputPolicyInline { - logger.Infof(ctx, "Falling back to sending task event outputs by reference for [%+v]", ev.TaskId) + logger.Infof(ctx, "Falling back to sending task event outputs by reference for [%+v]", ev.GetTaskId()) return r.handleFailure(ctx, origEvent, err) } return err diff --git a/flytepropeller/events/workflow_event_recorder.go b/flytepropeller/events/workflow_event_recorder.go index f0f48a7f9d..5e56799925 100644 --- a/flytepropeller/events/workflow_event_recorder.go +++ b/flytepropeller/events/workflow_event_recorder.go @@ -69,7 +69,7 @@ func (r *workflowEventRecorder) RecordWorkflowEvent(ctx context.Context, ev *eve logger.Infof(ctx, "Failed to record workflow event [%+v] with err: %v", ev, err) // Only attempt to retry sending an event in the case we tried to send raw output data inline if eventConfig.FallbackToOutputReference && rawOutputPolicy == config.RawOutputPolicyInline { - logger.Infof(ctx, "Falling back to sending workflow event outputs by reference for [%+v]", ev.ExecutionId) + logger.Infof(ctx, "Falling back to sending workflow event outputs by reference for [%+v]", ev.GetExecutionId()) return r.handleFailure(ctx, origEvent, err) } return err diff --git a/flytepropeller/go.mod b/flytepropeller/go.mod index c819278d90..c129312e44 100644 --- a/flytepropeller/go.mod +++ b/flytepropeller/go.mod @@ -22,11 +22,14 @@ require ( github.com/mitchellh/mapstructure v1.5.0 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.19.1 + github.com/santhosh-tekuri/jsonschema v1.2.4 github.com/shamaton/msgpack/v2 v2.2.2 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 + github.com/wI2L/jsondiff v0.6.0 + gitlab.com/yvesf/json-schema-compare v0.0.0-20190604192943-a900c04201f7 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 go.opentelemetry.io/otel v1.24.0 go.opentelemetry.io/otel/trace v1.24.0 @@ -124,6 +127,10 @@ require ( github.com/spf13/viper v1.11.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.2.0 // indirect + github.com/tidwall/gjson v1.17.1 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/tidwall/sjson v1.2.5 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect go.opentelemetry.io/otel/exporters/jaeger v1.17.0 // indirect diff --git a/flytepropeller/go.sum b/flytepropeller/go.sum index 37a8766913..63c498dc77 100644 --- a/flytepropeller/go.sum +++ b/flytepropeller/go.sum @@ -373,6 +373,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/santhosh-tekuri/jsonschema v1.2.4 h1:hNhW8e7t+H1vgY+1QeEQpveR6D4+OwKPXCfD2aieJis= +github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= github.com/shamaton/msgpack/v2 v2.2.2 h1:GOIg0c9LV04VwzOOqZSrmsv/JzjNOOMxnS/HvOHGdgs= github.com/shamaton/msgpack/v2 v2.2.2/go.mod h1:6khjYnkx73f7VQU7wjcFS9DFjs+59naVWJv1TB7qdOI= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -410,11 +412,25 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.17.1 h1:wlYEnwqAHgzmhNUFfw7Xalt2JzQvsMx2Se4PcoFCT/U= +github.com/tidwall/gjson v1.17.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/wI2L/jsondiff v0.6.0 h1:zrsH3FbfVa3JO9llxrcDy/XLkYPLgoMX6Mz3T2PP2AI= +github.com/wI2L/jsondiff v0.6.0/go.mod h1:D6aQ5gKgPF9g17j+E9N7aasmU1O+XvfmWm1y8UMmNpw= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +gitlab.com/yvesf/json-schema-compare v0.0.0-20190604192943-a900c04201f7 h1:BAkxmYRc1ZPl6Gap4HWqwPT8yLZMrgaAwx12Ft408sg= +gitlab.com/yvesf/json-schema-compare v0.0.0-20190604192943-a900c04201f7/go.mod h1:X40Z1OU8o1oiXWzBmkuYOaruzYGv60l0AxGiB0E9keI= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= diff --git a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/branch_test.go b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/branch_test.go index 5fd2a14218..b10c704409 100644 --- a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/branch_test.go +++ b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/branch_test.go @@ -17,7 +17,7 @@ func TestMarshalUnMarshal_BranchTask(t *testing.T) { err = json.Unmarshal(r, &o) assert.NoError(t, err) assert.NotNil(t, o.BranchNode.If) - assert.Equal(t, core.ComparisonExpression_GT, o.BranchNode.If.Condition.BooleanExpression.GetComparison().Operator) + assert.Equal(t, core.ComparisonExpression_GT, o.GetBranchNode().GetIf().GetCondition().GetComparison().GetOperator()) assert.Equal(t, 1, len(o.InputBindings)) raw, err := json.Marshal(o) if assert.NoError(t, err) { diff --git a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/error_test.go b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/error_test.go index 4e0968205d..d709fa6803 100644 --- a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/error_test.go +++ b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/error_test.go @@ -1,7 +1,6 @@ package v1alpha1 import ( - "encoding/json" "testing" "github.com/stretchr/testify/assert" @@ -10,21 +9,28 @@ import ( ) func TestExecutionErrorJSONMarshalling(t *testing.T) { - execError := &core.ExecutionError{ - Code: "TestCode", - Message: "Test error message", - ErrorUri: "Test error uri", + execError := ExecutionError{ + &core.ExecutionError{ + Code: "TestCode", + Message: "Test error message", + ErrorUri: "Test error uri", + }, } - execErr := &ExecutionError{ExecutionError: execError} - data, jErr := json.Marshal(execErr) - assert.Nil(t, jErr) + expected, mockErr := mockMarshalPbToBytes(execError.ExecutionError) + assert.Nil(t, mockErr) - newExecErr := &ExecutionError{} - uErr := json.Unmarshal(data, newExecErr) + // MarshalJSON + execErrorBytes, mErr := execError.MarshalJSON() + assert.Nil(t, mErr) + assert.Equal(t, expected, execErrorBytes) + + // UnmarshalJSON + execErrorObj := &ExecutionError{} + uErr := execErrorObj.UnmarshalJSON(execErrorBytes) assert.Nil(t, uErr) - assert.Equal(t, execError.Code, newExecErr.ExecutionError.Code) - assert.Equal(t, execError.Message, newExecErr.ExecutionError.Message) - assert.Equal(t, execError.ErrorUri, newExecErr.ExecutionError.ErrorUri) + assert.Equal(t, execError.Code, execErrorObj.Code) + assert.Equal(t, execError.Message, execError.Message) + assert.Equal(t, execError.ErrorUri, execErrorObj.ErrorUri) } diff --git a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/gate_test.go b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/gate_test.go new file mode 100644 index 0000000000..2fc532d4e4 --- /dev/null +++ b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/gate_test.go @@ -0,0 +1,150 @@ +package v1alpha1 + +import ( + "bytes" + "testing" + + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/types/known/durationpb" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" +) + +func mockMarshalPbToBytes(msg proto.Message) ([]byte, error) { + var buf bytes.Buffer + jMarshaller := jsonpb.Marshaler{} + if err := jMarshaller.Marshal(&buf, msg); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func TestApproveConditionJSONMarshalling(t *testing.T) { + approveCondition := ApproveCondition{ + &core.ApproveCondition{ + SignalId: "TestSignalId", + }, + } + + expected, mockErr := mockMarshalPbToBytes(approveCondition.ApproveCondition) + assert.Nil(t, mockErr) + + // MarshalJSON + approveConditionBytes, mErr := approveCondition.MarshalJSON() + assert.Nil(t, mErr) + assert.Equal(t, expected, approveConditionBytes) + + // UnmarshalJSON + approveConditionObj := &ApproveCondition{} + uErr := approveConditionObj.UnmarshalJSON(approveConditionBytes) + assert.Nil(t, uErr) + assert.Equal(t, approveCondition.SignalId, approveConditionObj.SignalId) +} + +func TestSignalConditionJSONMarshalling(t *testing.T) { + signalCondition := SignalCondition{ + &core.SignalCondition{ + SignalId: "TestSignalId", + }, + } + + expected, mockErr := mockMarshalPbToBytes(signalCondition.SignalCondition) + assert.Nil(t, mockErr) + + // MarshalJSON + signalConditionBytes, mErr := signalCondition.MarshalJSON() + assert.Nil(t, mErr) + assert.Equal(t, expected, signalConditionBytes) + + // UnmarshalJSON + signalConditionObj := &SignalCondition{} + uErr := signalConditionObj.UnmarshalJSON(signalConditionBytes) + assert.Nil(t, uErr) + assert.Equal(t, signalCondition.SignalId, signalConditionObj.SignalId) +} + +func TestSleepConditionJSONMarshalling(t *testing.T) { + sleepCondition := SleepCondition{ + &core.SleepCondition{ + Duration: &durationpb.Duration{ + Seconds: 10, + Nanos: 10, + }, + }, + } + + expected, mockErr := mockMarshalPbToBytes(sleepCondition.SleepCondition) + assert.Nil(t, mockErr) + + // MarshalJSON + sleepConditionBytes, mErr := sleepCondition.MarshalJSON() + assert.Nil(t, mErr) + assert.Equal(t, expected, sleepConditionBytes) + + // UnmarshalJSON + sleepConditionObj := &SleepCondition{} + uErr := sleepConditionObj.UnmarshalJSON(sleepConditionBytes) + assert.Nil(t, uErr) + assert.Equal(t, sleepCondition.Duration, sleepConditionObj.Duration) +} + +func TestGateNodeSpec_GetKind(t *testing.T) { + kind := ConditionKindApprove + gateNodeSpec := GateNodeSpec{ + Kind: kind, + } + + if gateNodeSpec.GetKind() != kind { + t.Errorf("Expected %s, but got %s", kind, gateNodeSpec.GetKind()) + } +} + +func TestGateNodeSpec_GetApprove(t *testing.T) { + approveCondition := &ApproveCondition{ + &core.ApproveCondition{ + SignalId: "TestSignalId", + }, + } + gateNodeSpec := GateNodeSpec{ + Approve: approveCondition, + } + + if gateNodeSpec.GetApprove() != approveCondition.ApproveCondition { + t.Errorf("Expected approveCondition, but got a different value") + } +} + +func TestGateNodeSpec_GetSignal(t *testing.T) { + signalCondition := &SignalCondition{ + &core.SignalCondition{ + SignalId: "TestSignalId", + }, + } + gateNodeSpec := GateNodeSpec{ + Signal: signalCondition, + } + + if gateNodeSpec.GetSignal() != signalCondition.SignalCondition { + t.Errorf("Expected signalCondition, but got a different value") + } +} + +func TestGateNodeSpec_GetSleep(t *testing.T) { + sleepCondition := &SleepCondition{ + &core.SleepCondition{ + Duration: &durationpb.Duration{ + Seconds: 10, + Nanos: 10, + }, + }, + } + gateNodeSpec := GateNodeSpec{ + Sleep: sleepCondition, + } + + if gateNodeSpec.GetSleep() != sleepCondition.SleepCondition { + t.Errorf("Expected sleepCondition, but got a different value") + } +} diff --git a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/identifier_test.go b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/identifier_test.go new file mode 100644 index 0000000000..b7bafaacb3 --- /dev/null +++ b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/identifier_test.go @@ -0,0 +1,115 @@ +package v1alpha1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" +) + +func TestIdentifierJSONMarshalling(t *testing.T) { + identifier := Identifier{ + &core.Identifier{ + ResourceType: core.ResourceType_TASK, + Project: "TestProject", + Domain: "TestDomain", + Name: "TestName", + Version: "TestVersion", + }, + } + + expected, mockErr := mockMarshalPbToBytes(identifier.Identifier) + assert.Nil(t, mockErr) + + // MarshalJSON + identifierBytes, mErr := identifier.MarshalJSON() + assert.Nil(t, mErr) + assert.Equal(t, expected, identifierBytes) + + // UnmarshalJSON + identifierObj := &Identifier{} + uErr := identifierObj.UnmarshalJSON(identifierBytes) + assert.Nil(t, uErr) + assert.Equal(t, identifier.Project, identifierObj.Project) + assert.Equal(t, identifier.Domain, identifierObj.Domain) + assert.Equal(t, identifier.Name, identifierObj.Name) + assert.Equal(t, identifier.Version, identifierObj.Version) +} + +func TestIdentifier_DeepCopyInto(t *testing.T) { + identifier := Identifier{ + &core.Identifier{ + ResourceType: core.ResourceType_TASK, + Project: "TestProject", + Domain: "TestDomain", + Name: "TestName", + Version: "TestVersion", + }, + } + + identifierCopy := Identifier{} + identifier.DeepCopyInto(&identifierCopy) + assert.Equal(t, identifier.Project, identifierCopy.Project) + assert.Equal(t, identifier.Domain, identifierCopy.Domain) + assert.Equal(t, identifier.Name, identifierCopy.Name) + assert.Equal(t, identifier.Version, identifierCopy.Version) +} + +func TestWorkflowExecutionIdentifier_DeepCopyInto(t *testing.T) { + weIdentifier := WorkflowExecutionIdentifier{ + &core.WorkflowExecutionIdentifier{ + Project: "TestProject", + Domain: "TestDomain", + Name: "TestName", + Org: "TestOrg", + }, + } + + weIdentifierCopy := WorkflowExecutionIdentifier{} + weIdentifier.DeepCopyInto(&weIdentifierCopy) + assert.Equal(t, weIdentifier.Project, weIdentifierCopy.Project) + assert.Equal(t, weIdentifier.Domain, weIdentifierCopy.Domain) + assert.Equal(t, weIdentifier.Name, weIdentifierCopy.Name) + assert.Equal(t, weIdentifier.Org, weIdentifierCopy.Org) +} + +func TestTaskExecutionIdentifier_DeepCopyInto(t *testing.T) { + teIdentifier := TaskExecutionIdentifier{ + &core.TaskExecutionIdentifier{ + TaskId: &core.Identifier{ + ResourceType: core.ResourceType_TASK, + Project: "TestProject", + Domain: "TestDomain", + Name: "TestName", + Version: "TestVersion", + Org: "TestOrg", + }, + NodeExecutionId: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Project: "TestProject", + Domain: "TestDomain", + Name: "TestName", + Org: "TestOrg", + }, + NodeId: "TestNodeId", + }, + RetryAttempt: 1, + }, + } + + teIdentifierCopy := TaskExecutionIdentifier{} + teIdentifier.DeepCopyInto(&teIdentifierCopy) + assert.Equal(t, teIdentifier.TaskId.GetResourceType(), teIdentifierCopy.TaskId.GetResourceType()) + assert.Equal(t, teIdentifier.TaskId.GetProject(), teIdentifierCopy.TaskId.GetProject()) + assert.Equal(t, teIdentifier.TaskId.GetDomain(), teIdentifierCopy.TaskId.GetDomain()) + assert.Equal(t, teIdentifier.TaskId.GetName(), teIdentifierCopy.TaskId.GetName()) + assert.Equal(t, teIdentifier.TaskId.GetVersion(), teIdentifierCopy.TaskId.GetVersion()) + assert.Equal(t, teIdentifier.TaskId.GetOrg(), teIdentifierCopy.TaskId.GetOrg()) + assert.Equal(t, teIdentifier.NodeExecutionId.GetExecutionId().GetProject(), teIdentifierCopy.NodeExecutionId.GetExecutionId().GetProject()) + assert.Equal(t, teIdentifier.NodeExecutionId.GetExecutionId().GetDomain(), teIdentifierCopy.NodeExecutionId.GetExecutionId().GetDomain()) + assert.Equal(t, teIdentifier.NodeExecutionId.GetExecutionId().GetName(), teIdentifierCopy.NodeExecutionId.GetExecutionId().GetName()) + assert.Equal(t, teIdentifier.NodeExecutionId.GetExecutionId().GetOrg(), teIdentifierCopy.NodeExecutionId.GetExecutionId().GetOrg()) + assert.Equal(t, teIdentifier.NodeExecutionId.GetNodeId(), teIdentifierCopy.NodeExecutionId.GetNodeId()) + assert.Equal(t, teIdentifier.RetryAttempt, teIdentifierCopy.RetryAttempt) +} diff --git a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/iface.go b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/iface.go index 486ac35a16..c2022dea25 100644 --- a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/iface.go +++ b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/iface.go @@ -290,6 +290,7 @@ type ExecutableArrayNodeStatus interface { GetSubNodeTaskPhases() bitarray.CompactArray GetSubNodeRetryAttempts() bitarray.CompactArray GetSubNodeSystemFailures() bitarray.CompactArray + GetSubNodeDeltaTimestamps() bitarray.CompactArray GetTaskPhaseVersion() uint32 } @@ -302,6 +303,7 @@ type MutableArrayNodeStatus interface { SetSubNodeTaskPhases(subNodeTaskPhases bitarray.CompactArray) SetSubNodeRetryAttempts(subNodeRetryAttempts bitarray.CompactArray) SetSubNodeSystemFailures(subNodeSystemFailures bitarray.CompactArray) + SetSubNodeDeltaTimestamps(subNodeDeltaTimestamps bitarray.CompactArray) SetTaskPhaseVersion(taskPhaseVersion uint32) } diff --git a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/mocks/ExecutableArrayNodeStatus.go b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/mocks/ExecutableArrayNodeStatus.go index f4cce3e643..4aee51f044 100644 --- a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/mocks/ExecutableArrayNodeStatus.go +++ b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/mocks/ExecutableArrayNodeStatus.go @@ -82,6 +82,38 @@ func (_m *ExecutableArrayNodeStatus) GetExecutionError() *core.ExecutionError { return r0 } +type ExecutableArrayNodeStatus_GetSubNodeDeltaTimestamps struct { + *mock.Call +} + +func (_m ExecutableArrayNodeStatus_GetSubNodeDeltaTimestamps) Return(_a0 bitarray.CompactArray) *ExecutableArrayNodeStatus_GetSubNodeDeltaTimestamps { + return &ExecutableArrayNodeStatus_GetSubNodeDeltaTimestamps{Call: _m.Call.Return(_a0)} +} + +func (_m *ExecutableArrayNodeStatus) OnGetSubNodeDeltaTimestamps() *ExecutableArrayNodeStatus_GetSubNodeDeltaTimestamps { + c_call := _m.On("GetSubNodeDeltaTimestamps") + return &ExecutableArrayNodeStatus_GetSubNodeDeltaTimestamps{Call: c_call} +} + +func (_m *ExecutableArrayNodeStatus) OnGetSubNodeDeltaTimestampsMatch(matchers ...interface{}) *ExecutableArrayNodeStatus_GetSubNodeDeltaTimestamps { + c_call := _m.On("GetSubNodeDeltaTimestamps", matchers...) + return &ExecutableArrayNodeStatus_GetSubNodeDeltaTimestamps{Call: c_call} +} + +// GetSubNodeDeltaTimestamps provides a mock function with given fields: +func (_m *ExecutableArrayNodeStatus) GetSubNodeDeltaTimestamps() bitarray.CompactArray { + ret := _m.Called() + + var r0 bitarray.CompactArray + if rf, ok := ret.Get(0).(func() bitarray.CompactArray); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bitarray.CompactArray) + } + + return r0 +} + type ExecutableArrayNodeStatus_GetSubNodePhases struct { *mock.Call } diff --git a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/mocks/MutableArrayNodeStatus.go b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/mocks/MutableArrayNodeStatus.go index c20f80e349..1e081e20ba 100644 --- a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/mocks/MutableArrayNodeStatus.go +++ b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/mocks/MutableArrayNodeStatus.go @@ -82,6 +82,38 @@ func (_m *MutableArrayNodeStatus) GetExecutionError() *core.ExecutionError { return r0 } +type MutableArrayNodeStatus_GetSubNodeDeltaTimestamps struct { + *mock.Call +} + +func (_m MutableArrayNodeStatus_GetSubNodeDeltaTimestamps) Return(_a0 bitarray.CompactArray) *MutableArrayNodeStatus_GetSubNodeDeltaTimestamps { + return &MutableArrayNodeStatus_GetSubNodeDeltaTimestamps{Call: _m.Call.Return(_a0)} +} + +func (_m *MutableArrayNodeStatus) OnGetSubNodeDeltaTimestamps() *MutableArrayNodeStatus_GetSubNodeDeltaTimestamps { + c_call := _m.On("GetSubNodeDeltaTimestamps") + return &MutableArrayNodeStatus_GetSubNodeDeltaTimestamps{Call: c_call} +} + +func (_m *MutableArrayNodeStatus) OnGetSubNodeDeltaTimestampsMatch(matchers ...interface{}) *MutableArrayNodeStatus_GetSubNodeDeltaTimestamps { + c_call := _m.On("GetSubNodeDeltaTimestamps", matchers...) + return &MutableArrayNodeStatus_GetSubNodeDeltaTimestamps{Call: c_call} +} + +// GetSubNodeDeltaTimestamps provides a mock function with given fields: +func (_m *MutableArrayNodeStatus) GetSubNodeDeltaTimestamps() bitarray.CompactArray { + ret := _m.Called() + + var r0 bitarray.CompactArray + if rf, ok := ret.Get(0).(func() bitarray.CompactArray); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bitarray.CompactArray) + } + + return r0 +} + type MutableArrayNodeStatus_GetSubNodePhases struct { *mock.Call } @@ -284,6 +316,11 @@ func (_m *MutableArrayNodeStatus) SetExecutionError(executionError *core.Executi _m.Called(executionError) } +// SetSubNodeDeltaTimestamps provides a mock function with given fields: subNodeDeltaTimestamps +func (_m *MutableArrayNodeStatus) SetSubNodeDeltaTimestamps(subNodeDeltaTimestamps bitarray.CompactArray) { + _m.Called(subNodeDeltaTimestamps) +} + // SetSubNodePhases provides a mock function with given fields: subNodePhases func (_m *MutableArrayNodeStatus) SetSubNodePhases(subNodePhases bitarray.CompactArray) { _m.Called(subNodePhases) diff --git a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/node_status.go b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/node_status.go index 218b045588..c27a8560fc 100644 --- a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/node_status.go +++ b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/node_status.go @@ -230,13 +230,14 @@ const ( type ArrayNodeStatus struct { MutableStruct - Phase ArrayNodePhase `json:"phase,omitempty"` - ExecutionError *core.ExecutionError `json:"executionError,omitempty"` - SubNodePhases bitarray.CompactArray `json:"subphase,omitempty"` - SubNodeTaskPhases bitarray.CompactArray `json:"subtphase,omitempty"` - SubNodeRetryAttempts bitarray.CompactArray `json:"subattempts,omitempty"` - SubNodeSystemFailures bitarray.CompactArray `json:"subsysfailures,omitempty"` - TaskPhaseVersion uint32 `json:"taskPhaseVersion,omitempty"` + Phase ArrayNodePhase `json:"phase,omitempty"` + ExecutionError *core.ExecutionError `json:"executionError,omitempty"` + SubNodePhases bitarray.CompactArray `json:"subphase,omitempty"` + SubNodeTaskPhases bitarray.CompactArray `json:"subtphase,omitempty"` + SubNodeRetryAttempts bitarray.CompactArray `json:"subattempts,omitempty"` + SubNodeSystemFailures bitarray.CompactArray `json:"subsysfailures,omitempty"` + SubNodeDeltaTimestamps bitarray.CompactArray `json:"subtimestamps,omitempty"` + TaskPhaseVersion uint32 `json:"taskPhaseVersion,omitempty"` } func (in *ArrayNodeStatus) GetArrayNodePhase() ArrayNodePhase { @@ -305,6 +306,17 @@ func (in *ArrayNodeStatus) SetSubNodeSystemFailures(subNodeSystemFailures bitarr } } +func (in *ArrayNodeStatus) GetSubNodeDeltaTimestamps() bitarray.CompactArray { + return in.SubNodeDeltaTimestamps +} + +func (in *ArrayNodeStatus) SetSubNodeDeltaTimestamps(subNodeDeltaTimestamps bitarray.CompactArray) { + if in.SubNodeDeltaTimestamps != subNodeDeltaTimestamps { + in.SetDirty() + in.SubNodeDeltaTimestamps = subNodeDeltaTimestamps + } +} + func (in *ArrayNodeStatus) GetTaskPhaseVersion() uint32 { return in.TaskPhaseVersion } diff --git a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/register.go b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/register.go index 065b8a8852..8e0d96ada3 100644 --- a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/register.go +++ b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/register.go @@ -13,7 +13,7 @@ const FlyteWorkflowKind = "flyteworkflow" // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: flyteworkflow.GroupName, Version: "v1alpha1"} -// GetKind takes an unqualified kind and returns back a Group qualified GroupKind +// Kind takes an unqualified kind and returns back a Group qualified GroupKind func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } diff --git a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/register_test.go b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/register_test.go new file mode 100644 index 0000000000..f55a596ad8 --- /dev/null +++ b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/register_test.go @@ -0,0 +1,28 @@ +package v1alpha1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/runtime" +) + +func TestKind(t *testing.T) { + kind := "test kind" + got := Kind(kind) + want := SchemeGroupVersion.WithKind(kind).GroupKind() + assert.Equal(t, got, want) +} + +func TestResource(t *testing.T) { + resource := "test resource" + got := Resource(resource) + want := SchemeGroupVersion.WithResource(resource).GroupResource() + assert.Equal(t, got, want) +} + +func Test_addKnownTypes(t *testing.T) { + scheme := runtime.NewScheme() + err := addKnownTypes(scheme) + assert.Nil(t, err) +} diff --git a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/subworkflow_test.go b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/subworkflow_test.go new file mode 100644 index 0000000000..c0534ef9ea --- /dev/null +++ b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/subworkflow_test.go @@ -0,0 +1,36 @@ +package v1alpha1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" +) + +func TestWorkflowNodeSpec_GetLaunchPlanRefID(t *testing.T) { + wfNodeSpec := &WorkflowNodeSpec{ + LaunchPlanRefID: &LaunchPlanRefID{ + &core.Identifier{ + Project: "TestProject", + }, + }, + } + + nilWfNodeSpec := &WorkflowNodeSpec{} + + assert.Equal(t, wfNodeSpec.GetLaunchPlanRefID(), wfNodeSpec.LaunchPlanRefID) + assert.Empty(t, nilWfNodeSpec.GetLaunchPlanRefID()) +} + +func TestWorkflowNodeSpec_GetSubWorkflowRef(t *testing.T) { + workflowID := "TestWorkflowID" + wfNodeSpec := &WorkflowNodeSpec{ + SubWorkflowReference: &workflowID, + } + + nilWfNodeSpec := &WorkflowNodeSpec{} + + assert.Equal(t, wfNodeSpec.GetSubWorkflowRef(), wfNodeSpec.SubWorkflowReference) + assert.Empty(t, nilWfNodeSpec.GetSubWorkflowRef()) +} diff --git a/flytepropeller/pkg/compiler/admin.go b/flytepropeller/pkg/compiler/admin.go index cceab67f67..94646c95d2 100644 --- a/flytepropeller/pkg/compiler/admin.go +++ b/flytepropeller/pkg/compiler/admin.go @@ -29,8 +29,8 @@ func (p *LaunchPlanInterfaceProvider) GetExpectedOutputs() *core.VariableMap { func NewLaunchPlanInterfaceProvider(launchPlan *admin.LaunchPlan) *LaunchPlanInterfaceProvider { return &LaunchPlanInterfaceProvider{ - expectedInputs: launchPlan.Closure.ExpectedInputs, - expectedOutputs: launchPlan.Closure.ExpectedOutputs, - identifier: launchPlan.Id, + expectedInputs: launchPlan.GetClosure().GetExpectedInputs(), + expectedOutputs: launchPlan.GetClosure().GetExpectedOutputs(), + identifier: launchPlan.GetId(), } } diff --git a/flytepropeller/pkg/compiler/admin_test.go b/flytepropeller/pkg/compiler/admin_test.go index 8e5447bfb1..a4a0891d51 100644 --- a/flytepropeller/pkg/compiler/admin_test.go +++ b/flytepropeller/pkg/compiler/admin_test.go @@ -59,15 +59,15 @@ func TestGetId(t *testing.T) { func TestGetExpectedInputs(t *testing.T) { launchPlan := getDummyLaunchPlan() provider := NewLaunchPlanInterfaceProvider(launchPlan) - assert.Contains(t, (*provider.GetExpectedInputs()).Parameters, "foo") - assert.NotNil(t, (*provider.GetExpectedInputs()).Parameters["foo"].Var.Type.GetSimple()) - assert.EqualValues(t, "STRING", (*provider.GetExpectedInputs()).Parameters["foo"].Var.Type.GetSimple().String()) - assert.NotNil(t, (*provider.GetExpectedInputs()).Parameters["foo"].GetDefault()) + assert.Contains(t, (*provider.GetExpectedInputs()).GetParameters(), "foo") + assert.NotNil(t, (*provider.GetExpectedInputs()).GetParameters()["foo"].GetVar().GetType().GetSimple()) + assert.EqualValues(t, "STRING", (*provider.GetExpectedInputs()).GetParameters()["foo"].GetVar().GetType().GetSimple().String()) + assert.NotNil(t, (*provider.GetExpectedInputs()).GetParameters()["foo"].GetDefault()) } func TestGetExpectedOutputs(t *testing.T) { launchPlan := getDummyLaunchPlan() provider := NewLaunchPlanInterfaceProvider(launchPlan) - assert.EqualValues(t, outputs.Variables["foo"].GetType().GetType(), - provider.GetExpectedOutputs().Variables["foo"].GetType().GetType()) + assert.EqualValues(t, outputs.GetVariables()["foo"].GetType().GetType(), + provider.GetExpectedOutputs().GetVariables()["foo"].GetType().GetType()) } diff --git a/flytepropeller/pkg/compiler/builders.go b/flytepropeller/pkg/compiler/builders.go index 908a4b91cb..6e875abc40 100644 --- a/flytepropeller/pkg/compiler/builders.go +++ b/flytepropeller/pkg/compiler/builders.go @@ -32,8 +32,8 @@ type workflowBuilder struct { } func (w workflowBuilder) GetFailureNode() c.Node { - if w.GetCoreWorkflow() != nil && w.GetCoreWorkflow().GetTemplate() != nil && w.GetCoreWorkflow().GetTemplate().FailureNode != nil { - return w.GetOrCreateNodeBuilder(w.GetCoreWorkflow().GetTemplate().FailureNode) + if w.GetCoreWorkflow() != nil && w.GetCoreWorkflow().GetTemplate() != nil && w.GetCoreWorkflow().GetTemplate().GetFailureNode() != nil { + return w.GetOrCreateNodeBuilder(w.GetCoreWorkflow().GetTemplate().GetFailureNode()) } return nil @@ -152,8 +152,8 @@ func (t taskBuilder) GetCoreTask() *core.TaskTemplate { } func (t taskBuilder) GetID() c.Identifier { - if t.flyteTask.Id != nil { - return t.flyteTask.Id + if t.flyteTask.GetId() != nil { + return t.flyteTask.GetId() } return &core.Identifier{} diff --git a/flytepropeller/pkg/compiler/common/id_set.go b/flytepropeller/pkg/compiler/common/id_set.go index eb118fae64..a5cf98dd8d 100644 --- a/flytepropeller/pkg/compiler/common/id_set.go +++ b/flytepropeller/pkg/compiler/common/id_set.go @@ -62,24 +62,24 @@ type sortableSliceOfString []Identifier func (s sortableSliceOfString) Len() int { return len(s) } func (s sortableSliceOfString) Less(i, j int) bool { first, second := s[i], s[j] - if first.ResourceType != second.ResourceType { - return first.ResourceType < second.ResourceType + if first.GetResourceType() != second.GetResourceType() { + return first.GetResourceType() < second.GetResourceType() } - if first.Project != second.Project { - return first.Project < second.Project + if first.GetProject() != second.GetProject() { + return first.GetProject() < second.GetProject() } - if first.Domain != second.Domain { - return first.Domain < second.Domain + if first.GetDomain() != second.GetDomain() { + return first.GetDomain() < second.GetDomain() } - if first.Name != second.Name { - return first.Name < second.Name + if first.GetName() != second.GetName() { + return first.GetName() < second.GetName() } - if first.Version != second.Version { - return first.Version < second.Version + if first.GetVersion() != second.GetVersion() { + return first.GetVersion() < second.GetVersion() } return false diff --git a/flytepropeller/pkg/compiler/common/index.go b/flytepropeller/pkg/compiler/common/index.go index 365a3356c1..d244103e35 100644 --- a/flytepropeller/pkg/compiler/common/index.go +++ b/flytepropeller/pkg/compiler/common/index.go @@ -55,16 +55,16 @@ func NewWorkflowIndex(workflows []*core.CompiledWorkflow, errs errors.CompileErr ok = true index = make(WorkflowIndex, len(workflows)) for _, wf := range workflows { - if wf.Template.Id == nil { + if wf.GetTemplate().GetId() == nil { // TODO: Log/Return error return nil, false } - if _, found := index[wf.Template.Id.String()]; found { - errs.Collect(errors.NewDuplicateIDFoundErr(wf.Template.Id.String())) + if _, found := index[wf.GetTemplate().GetId().String()]; found { + errs.Collect(errors.NewDuplicateIDFoundErr(wf.GetTemplate().GetId().String())) ok = false } else { - index[wf.Template.Id.String()] = wf + index[wf.GetTemplate().GetId().String()] = wf } } diff --git a/flytepropeller/pkg/compiler/requirements.go b/flytepropeller/pkg/compiler/requirements.go index b3b01823a6..69265b64a1 100644 --- a/flytepropeller/pkg/compiler/requirements.go +++ b/flytepropeller/pkg/compiler/requirements.go @@ -57,11 +57,11 @@ func getRequirements(fg *core.WorkflowTemplate, subWfs common.WorkflowIndex, fol func updateWorkflowRequirements(workflow *core.WorkflowTemplate, subWfs common.WorkflowIndex, taskIds, workflowIds common.IdentifierSet, followSubworkflows bool, errs errors.CompileErrors) { - for _, node := range workflow.Nodes { + for _, node := range workflow.GetNodes() { updateNodeRequirements(node, subWfs, taskIds, workflowIds, followSubworkflows, errs) } - if workflow.FailureNode != nil { - updateNodeRequirements(workflow.FailureNode, subWfs, taskIds, workflowIds, followSubworkflows, errs) + if workflow.GetFailureNode() != nil { + updateNodeRequirements(workflow.GetFailureNode(), subWfs, taskIds, workflowIds, followSubworkflows, errs) } } @@ -75,21 +75,21 @@ func updateNodeRequirements(node *flyteNode, subWfs common.WorkflowIndex, taskId workflowIds.Insert(workflowNode.GetLaunchplanRef()) } else if workflowNode.GetSubWorkflowRef() != nil && followSubworkflows { if subWf, found := subWfs[workflowNode.GetSubWorkflowRef().String()]; !found { - errs.Collect(errors.NewWorkflowReferenceNotFoundErr(node.Id, workflowNode.GetSubWorkflowRef().String())) + errs.Collect(errors.NewWorkflowReferenceNotFoundErr(node.GetId(), workflowNode.GetSubWorkflowRef().String())) } else { - updateWorkflowRequirements(subWf.Template, subWfs, taskIds, workflowIds, followSubworkflows, errs) + updateWorkflowRequirements(subWf.GetTemplate(), subWfs, taskIds, workflowIds, followSubworkflows, errs) } } } else if branchN := node.GetBranchNode(); branchN != nil { - updateNodeRequirements(branchN.IfElse.Case.ThenNode, subWfs, taskIds, workflowIds, followSubworkflows, errs) - for _, otherCase := range branchN.IfElse.Other { - updateNodeRequirements(otherCase.ThenNode, subWfs, taskIds, workflowIds, followSubworkflows, errs) + updateNodeRequirements(branchN.GetIfElse().GetCase().GetThenNode(), subWfs, taskIds, workflowIds, followSubworkflows, errs) + for _, otherCase := range branchN.GetIfElse().GetOther() { + updateNodeRequirements(otherCase.GetThenNode(), subWfs, taskIds, workflowIds, followSubworkflows, errs) } - if elseNode := branchN.IfElse.GetElseNode(); elseNode != nil { + if elseNode := branchN.GetIfElse().GetElseNode(); elseNode != nil { updateNodeRequirements(elseNode, subWfs, taskIds, workflowIds, followSubworkflows, errs) } } else if arrayNode := node.GetArrayNode(); arrayNode != nil { - updateNodeRequirements(arrayNode.Node, subWfs, taskIds, workflowIds, followSubworkflows, errs) + updateNodeRequirements(arrayNode.GetNode(), subWfs, taskIds, workflowIds, followSubworkflows, errs) } } diff --git a/flytepropeller/pkg/compiler/task_compiler.go b/flytepropeller/pkg/compiler/task_compiler.go index 4d8fea46db..ea6e4efef2 100644 --- a/flytepropeller/pkg/compiler/task_compiler.go +++ b/flytepropeller/pkg/compiler/task_compiler.go @@ -23,25 +23,25 @@ func validateResource(resourceName core.Resources_ResourceName, resourceVal stri func validateKnownResources(resources []*core.Resources_ResourceEntry, errs errors.CompileErrors) { for _, r := range resources { - validateResource(r.Name, r.Value, errs.NewScope()) + validateResource(r.GetName(), r.GetValue(), errs.NewScope()) } } func validateResources(resources *core.Resources, errs errors.CompileErrors) (ok bool) { // Validate known resource keys. - validateKnownResources(resources.Requests, errs.NewScope()) - validateKnownResources(resources.Limits, errs.NewScope()) + validateKnownResources(resources.GetRequests(), errs.NewScope()) + validateKnownResources(resources.GetLimits(), errs.NewScope()) return !errs.HasErrors() } func validateContainerCommand(task *core.TaskTemplate, errs errors.CompileErrors) (ok bool) { - if task.Interface == nil { + if task.GetInterface() == nil { // Nothing to validate. return } - hasInputs := task.Interface.Inputs != nil && len(task.Interface.GetInputs().Variables) > 0 - hasOutputs := task.Interface.Outputs != nil && len(task.Interface.GetOutputs().Variables) > 0 + hasInputs := task.GetInterface().GetInputs() != nil && len(task.GetInterface().GetInputs().GetVariables()) > 0 + hasOutputs := task.GetInterface().GetOutputs() != nil && len(task.GetInterface().GetOutputs().GetVariables()) > 0 if !(hasInputs || hasOutputs) { // Nothing to validate. return @@ -63,12 +63,12 @@ func validateContainer(task *core.TaskTemplate, errs errors.CompileErrors) (ok b validateContainerCommand(task, errs) container := task.GetContainer() - if container.Image == "" { + if container.GetImage() == "" { errs.Collect(errors.NewValueRequiredErr("container", "image")) } - if container.Resources != nil { - validateResources(container.Resources, errs.NewScope()) + if container.GetResources() != nil { + validateResources(container.GetResources(), errs.NewScope()) } return !errs.HasErrors() @@ -80,7 +80,7 @@ func validateK8sPod(task *core.TaskTemplate, errs errors.CompileErrors) (ok bool return } var podSpec v1.PodSpec - if err := utils.UnmarshalStructToObj(task.GetK8SPod().PodSpec, &podSpec); err != nil { + if err := utils.UnmarshalStructToObj(task.GetK8SPod().GetPodSpec(), &podSpec); err != nil { errs.Collect(errors.NewInvalidValueErr("root", "k8s pod spec")) return } @@ -93,7 +93,7 @@ func validateK8sPod(task *core.TaskTemplate, errs errors.CompileErrors) (ok bool } func compileTaskInternal(task *core.TaskTemplate, errs errors.CompileErrors) common.Task { - if task.Id == nil { + if task.GetId() == nil { errs.Collect(errors.NewValueRequiredErr("root", "Id")) } diff --git a/flytepropeller/pkg/compiler/test/compiler_test.go b/flytepropeller/pkg/compiler/test/compiler_test.go index 355fc4a15b..a6925dc3de 100644 --- a/flytepropeller/pkg/compiler/test/compiler_test.go +++ b/flytepropeller/pkg/compiler/test/compiler_test.go @@ -3,7 +3,6 @@ package test import ( "encoding/json" "flag" - "io/ioutil" "os" "path/filepath" "reflect" @@ -36,27 +35,27 @@ func makeDefaultInputs(iface *core.TypedInterface) *core.LiteralMap { return nil } - res := make(map[string]*core.Literal, len(iface.GetInputs().Variables)) - for inputName, inputVar := range iface.GetInputs().Variables { + res := make(map[string]*core.Literal, len(iface.GetInputs().GetVariables())) + for inputName, inputVar := range iface.GetInputs().GetVariables() { // A workaround because the coreutils don't support the "StructuredDataSet" type - if reflect.TypeOf(inputVar.Type.Type) == reflect.TypeOf(&core.LiteralType_StructuredDatasetType{}) { + if reflect.TypeOf(inputVar.GetType().GetType()) == reflect.TypeOf(&core.LiteralType_StructuredDatasetType{}) { res[inputName] = &core.Literal{ Value: &core.Literal_Scalar{ Scalar: &core.Scalar{ Value: &core.Scalar_StructuredDataset{ StructuredDataset: &core.StructuredDataset{ Metadata: &core.StructuredDatasetMetadata{ - StructuredDatasetType: inputVar.Type.Type.(*core.LiteralType_StructuredDatasetType).StructuredDatasetType, + StructuredDatasetType: inputVar.GetType().GetType().(*core.LiteralType_StructuredDatasetType).StructuredDatasetType, }, }, }, }, }, } - } else if reflect.TypeOf(inputVar.Type.Type) == reflect.TypeOf(&core.LiteralType_Simple{}) && inputVar.Type.GetSimple() == core.SimpleType_DATETIME { + } else if reflect.TypeOf(inputVar.GetType().GetType()) == reflect.TypeOf(&core.LiteralType_Simple{}) && inputVar.GetType().GetSimple() == core.SimpleType_DATETIME { res[inputName] = coreutils.MustMakeLiteral(time.UnixMicro(10)) } else { - res[inputName] = coreutils.MustMakeDefaultLiteralForType(inputVar.Type) + res[inputName] = coreutils.MustMakeDefaultLiteralForType(inputVar.GetType()) } } @@ -114,7 +113,7 @@ func TestDynamic(t *testing.T) { // t.SkipNow() //} - raw, err := ioutil.ReadFile(path) + raw, err := os.ReadFile(path) assert.NoError(t, err) wf := &core.DynamicJobSpec{} err = utils.UnmarshalBytesToPb(raw, wf) @@ -123,7 +122,7 @@ func TestDynamic(t *testing.T) { } t.Log("Compiling Workflow") - compiledTasks := mustCompileTasks(t, wf.Tasks) + compiledTasks := mustCompileTasks(t, wf.GetTasks()) wfTemplate := &core.WorkflowTemplate{ Id: &core.Identifier{ Domain: "domain", @@ -146,16 +145,16 @@ func TestDynamic(t *testing.T) { }, }}, }, - Nodes: wf.Nodes, - Outputs: wf.Outputs, + Nodes: wf.GetNodes(), + Outputs: wf.GetOutputs(), } - compiledWfc, err := compiler.CompileWorkflow(wfTemplate, wf.Subworkflows, compiledTasks, + compiledWfc, err := compiler.CompileWorkflow(wfTemplate, wf.GetSubworkflows(), compiledTasks, []common.InterfaceProvider{}) if !assert.NoError(t, err) { t.FailNow() } - inputs := makeDefaultInputs(compiledWfc.Primary.Template.Interface) + inputs := makeDefaultInputs(compiledWfc.GetPrimary().GetTemplate().GetInterface()) flyteWf, err := k8s.BuildFlyteWorkflow(compiledWfc, inputs, @@ -180,22 +179,22 @@ func TestDynamic(t *testing.T) { func getAllSubNodeIDs(n *core.Node) sets.String { res := sets.NewString() if branchNode := n.GetBranchNode(); branchNode != nil { - thenNode := branchNode.IfElse.Case.ThenNode + thenNode := branchNode.GetIfElse().GetCase().GetThenNode() if hasPromiseInputs(thenNode.GetInputs()) { res.Insert(thenNode.GetId()) } res = res.Union(getAllSubNodeIDs(thenNode)) - for _, other := range branchNode.IfElse.Other { - if hasPromiseInputs(other.ThenNode.GetInputs()) { - res.Insert(other.ThenNode.GetId()) + for _, other := range branchNode.GetIfElse().GetOther() { + if hasPromiseInputs(other.GetThenNode().GetInputs()) { + res.Insert(other.GetThenNode().GetId()) } - res = res.Union(getAllSubNodeIDs(other.ThenNode)) + res = res.Union(getAllSubNodeIDs(other.GetThenNode())) } - if elseNode := branchNode.IfElse.GetElseNode(); elseNode != nil { + if elseNode := branchNode.GetIfElse().GetElseNode(); elseNode != nil { if hasPromiseInputs(elseNode.GetInputs()) { res.Insert(elseNode.GetId()) } @@ -221,7 +220,7 @@ var allNodesPredicate = func(n *core.Node) bool { func getAllMatchingNodes(wf *core.CompiledWorkflow, predicate nodePredicate) sets.String { s := sets.NewString() - for _, n := range wf.Template.Nodes { + for _, n := range wf.GetTemplate().GetNodes() { if predicate(n) { s.Insert(n.GetId()) } @@ -235,13 +234,13 @@ func getAllMatchingNodes(wf *core.CompiledWorkflow, predicate nodePredicate) set func bindingHasPromiseInputs(binding *core.BindingData) bool { switch v := binding.GetValue().(type) { case *core.BindingData_Collection: - for _, d := range v.Collection.Bindings { + for _, d := range v.Collection.GetBindings() { if bindingHasPromiseInputs(d) { return true } } case *core.BindingData_Map: - for _, d := range v.Map.Bindings { + for _, d := range v.Map.GetBindings() { if bindingHasPromiseInputs(d) { return true } @@ -255,7 +254,7 @@ func bindingHasPromiseInputs(binding *core.BindingData) bool { func hasPromiseInputs(bindings []*core.Binding) bool { for _, b := range bindings { - if bindingHasPromiseInputs(b.Binding) { + if bindingHasPromiseInputs(b.GetBinding()) { return true } } @@ -265,14 +264,14 @@ func hasPromiseInputs(bindings []*core.Binding) bool { func assertNodeIDsInConnections(t testing.TB, nodeIDsWithDeps, allNodeIDs sets.String, connections *core.ConnectionSet) bool { actualNodeIDs := sets.NewString() - for id, lst := range connections.Downstream { + for id, lst := range connections.GetDownstream() { actualNodeIDs.Insert(id) - actualNodeIDs.Insert(lst.Ids...) + actualNodeIDs.Insert(lst.GetIds()...) } - for id, lst := range connections.Upstream { + for id, lst := range connections.GetUpstream() { actualNodeIDs.Insert(id) - actualNodeIDs.Insert(lst.Ids...) + actualNodeIDs.Insert(lst.GetIds()...) } notFoundInConnections := nodeIDsWithDeps.Difference(actualNodeIDs) @@ -305,13 +304,13 @@ func storeOrDiff(t testing.TB, f func(obj any) ([]byte, error), obj any, path st } if *update { - err = ioutil.WriteFile(path, raw, os.ModePerm) + err = os.WriteFile(path, raw, os.ModePerm) // #nosec G306 if !assert.NoError(t, err) { return false } } else { - goldenRaw, err := ioutil.ReadFile(path) + goldenRaw, err := os.ReadFile(path) if !assert.NoError(t, err) { return false } @@ -339,7 +338,7 @@ func runCompileTest(t *testing.T, dirName string) { } for _, p := range paths { - raw, err := ioutil.ReadFile(p) + raw, err := os.ReadFile(p) assert.NoError(t, err) tsk := &admin.TaskSpec{} err = proto.Unmarshal(raw, tsk) @@ -349,13 +348,13 @@ func runCompileTest(t *testing.T, dirName string) { } t.Run(p, func(t *testing.T) { - inputTask := tsk.Template + inputTask := tsk.GetTemplate() setDefaultFields(inputTask) task, err := compiler.CompileTask(inputTask) if !assert.NoError(t, err) { t.FailNow() } - compiledTasks[tsk.Template.Id.String()] = task + compiledTasks[tsk.GetTemplate().GetId().String()] = task // unmarshal from json file to compare rather than yaml taskFile := filepath.Join(filepath.Dir(p), "compiled", strings.TrimRight(filepath.Base(p), filepath.Ext(p))+"_task.json") @@ -387,7 +386,7 @@ func runCompileTest(t *testing.T, dirName string) { } t.Run(p, func(t *testing.T) { - inputWf := wf.Workflow + inputWf := wf.GetWorkflow() reqs, err := compiler.GetRequirements(inputWf, nil) if !assert.NoError(t, err) { @@ -411,9 +410,9 @@ func runCompileTest(t *testing.T, dirName string) { t.FailNow() } - allNodeIDs := getAllMatchingNodes(compiledWfc.Primary, allNodesPredicate) - nodeIDsWithDeps := getAllMatchingNodes(compiledWfc.Primary, hasPromiseNodePredicate) - if !assertNodeIDsInConnections(t, nodeIDsWithDeps, allNodeIDs, compiledWfc.Primary.Connections) { + allNodeIDs := getAllMatchingNodes(compiledWfc.GetPrimary(), allNodesPredicate) + nodeIDsWithDeps := getAllMatchingNodes(compiledWfc.GetPrimary(), hasPromiseNodePredicate) + if !assertNodeIDsInConnections(t, nodeIDsWithDeps, allNodeIDs, compiledWfc.GetPrimary().GetConnections()) { t.FailNow() } @@ -433,7 +432,7 @@ func runCompileTest(t *testing.T, dirName string) { for _, p := range paths { t.Run(p, func(t *testing.T) { - raw, err := ioutil.ReadFile(p) + raw, err := os.ReadFile(p) if !assert.NoError(t, err) { t.FailNow() } @@ -443,9 +442,9 @@ func runCompileTest(t *testing.T, dirName string) { t.FailNow() } - inputs := makeDefaultInputs(compiledWfc.Primary.Template.Interface) + inputs := makeDefaultInputs(compiledWfc.GetPrimary().GetTemplate().GetInterface()) - dotFormat := visualize.ToGraphViz(compiledWfc.Primary) + dotFormat := visualize.ToGraphViz(compiledWfc.GetPrimary()) t.Logf("GraphViz Dot: %v\n", dotFormat) flyteWf, err := k8s.BuildFlyteWorkflow(compiledWfc, diff --git a/flytepropeller/pkg/compiler/transformers/k8s/inputs.go b/flytepropeller/pkg/compiler/transformers/k8s/inputs.go index 2b94570c20..6d7572e9f5 100644 --- a/flytepropeller/pkg/compiler/transformers/k8s/inputs.go +++ b/flytepropeller/pkg/compiler/transformers/k8s/inputs.go @@ -15,20 +15,20 @@ func validateInputs(nodeID common.NodeID, iface *core.TypedInterface, inputs cor return false } - if iface.Inputs == nil { + if iface.GetInputs() == nil { errs.Collect(errors.NewValueRequiredErr(nodeID, "interface.InputsRef")) return false } - varMap := make(map[string]*core.Variable, len(iface.Inputs.Variables)) + varMap := make(map[string]*core.Variable, len(iface.GetInputs().GetVariables())) requiredInputsSet := sets.String{} - for name, v := range iface.Inputs.Variables { + for name, v := range iface.GetInputs().GetVariables() { varMap[name] = v requiredInputsSet.Insert(name) } boundInputsSet := sets.String{} - for inputVar, inputVal := range inputs.Literals { + for inputVar, inputVal := range inputs.GetLiterals() { v, exists := varMap[inputVar] if !exists { errs.Collect(errors.NewVariableNameNotFoundErr(nodeID, "", inputVar)) @@ -41,8 +41,8 @@ func validateInputs(nodeID common.NodeID, iface *core.TypedInterface, inputs cor errs.Collect(errors.NewInvalidLiteralTypeErr(nodeID, inputVar, err)) continue } - if !validators.AreTypesCastable(inputType, v.Type) { - errs.Collect(errors.NewMismatchingTypesErr(nodeID, inputVar, common.LiteralTypeToStr(v.Type), common.LiteralTypeToStr(inputType))) + if !validators.AreTypesCastable(inputType, v.GetType()) { + errs.Collect(errors.NewMismatchingTypesErr(nodeID, inputVar, common.LiteralTypeToStr(v.GetType()), common.LiteralTypeToStr(inputType))) continue } diff --git a/flytepropeller/pkg/compiler/transformers/k8s/node.go b/flytepropeller/pkg/compiler/transformers/k8s/node.go index 8a4c9248ec..18ec1ba02f 100644 --- a/flytepropeller/pkg/compiler/transformers/k8s/node.go +++ b/flytepropeller/pkg/compiler/transformers/k8s/node.go @@ -35,8 +35,8 @@ func buildNodeSpec(n *core.Node, tasks []*core.CompiledTask, errs errors.Compile taskID := n.GetTaskNode().GetReferenceId().String() // TODO: Use task index for quick lookup for _, t := range tasks { - if t.Template.Id.String() == taskID { - task = t.Template + if t.GetTemplate().GetId().String() == taskID { + task = t.GetTemplate() break } } @@ -46,7 +46,7 @@ func buildNodeSpec(n *core.Node, tasks []*core.CompiledTask, errs errors.Compile return nil, !errs.HasErrors() } - if overrides := n.GetTaskNode().Overrides; overrides != nil { + if overrides := n.GetTaskNode().GetOverrides(); overrides != nil { if overrides.GetResources() != nil { resources = overrides.GetResources() } @@ -87,7 +87,7 @@ func buildNodeSpec(n *core.Node, tasks []*core.CompiledTask, errs errors.Compile interruptVal := n.GetMetadata().GetInterruptible() interruptible = &interruptVal } - name = n.GetMetadata().Name + name = n.GetMetadata().GetName() } nodeSpec := &v1alpha1.NodeSpec{ @@ -114,7 +114,7 @@ func buildNodeSpec(n *core.Node, tasks []*core.CompiledTask, errs errors.Compile return nil, !errs.HasErrors() } - switch n.GetWorkflowNode().Reference.(type) { + switch n.GetWorkflowNode().GetReference().(type) { case *core.WorkflowNode_LaunchplanRef: nodeSpec.Kind = v1alpha1.NodeKindWorkflow nodeSpec.WorkflowNode = &v1alpha1.WorkflowNodeSpec{ @@ -146,7 +146,7 @@ func buildNodeSpec(n *core.Node, tasks []*core.CompiledTask, errs errors.Compile case *core.Node_GateNode: nodeSpec.Kind = v1alpha1.NodeKindGate gateNode := n.GetGateNode() - switch gateNode.Condition.(type) { + switch gateNode.GetCondition().(type) { case *core.GateNode_Approve: nodeSpec.GateNode = &v1alpha1.GateNodeSpec{ Kind: v1alpha1.ConditionKindApprove, @@ -173,7 +173,7 @@ func buildNodeSpec(n *core.Node, tasks []*core.CompiledTask, errs errors.Compile arrayNode := n.GetArrayNode() // build subNodeSpecs - subNodeSpecs, ok := buildNodeSpec(arrayNode.Node, tasks, errs) + subNodeSpecs, ok := buildNodeSpec(arrayNode.GetNode(), tasks, errs) if !ok { return nil, ok } @@ -191,7 +191,7 @@ func buildNodeSpec(n *core.Node, tasks []*core.CompiledTask, errs errors.Compile Parallelism: parallelism, } - switch successCriteria := arrayNode.SuccessCriteria.(type) { + switch successCriteria := arrayNode.GetSuccessCriteria().(type) { case *core.ArrayNode_MinSuccesses: nodeSpec.ArrayNode.MinSuccesses = &successCriteria.MinSuccesses case *core.ArrayNode_MinSuccessRatio: @@ -209,13 +209,13 @@ func buildNodeSpec(n *core.Node, tasks []*core.CompiledTask, errs errors.Compile } func buildIfBlockSpec(block *core.IfBlock, tasks []*core.CompiledTask, errs errors.CompileErrors) (*v1alpha1.IfBlock, []*v1alpha1.NodeSpec) { - nodeSpecs, ok := buildNodeSpec(block.ThenNode, tasks, errs) + nodeSpecs, ok := buildNodeSpec(block.GetThenNode(), tasks, errs) if !ok { return nil, []*v1alpha1.NodeSpec{} } return &v1alpha1.IfBlock{ - Condition: v1alpha1.BooleanExpression{BooleanExpression: block.Condition}, - ThenNode: refStr(block.ThenNode.Id), + Condition: v1alpha1.BooleanExpression{BooleanExpression: block.GetCondition()}, + ThenNode: refStr(block.GetThenNode().GetId()), }, nodeSpecs } @@ -226,26 +226,26 @@ func buildBranchNodeSpec(branch *core.BranchNode, tasks []*core.CompiledTask, er var childNodes []*v1alpha1.NodeSpec - branchNode, nodeSpecs := buildIfBlockSpec(branch.IfElse.Case, tasks, errs.NewScope()) + branchNode, nodeSpecs := buildIfBlockSpec(branch.GetIfElse().GetCase(), tasks, errs.NewScope()) res := &v1alpha1.BranchNodeSpec{ If: *branchNode, } childNodes = append(childNodes, nodeSpecs...) - switch branch.IfElse.GetDefault().(type) { + switch branch.GetIfElse().GetDefault().(type) { case *core.IfElseBlock_ElseNode: - ns, ok := buildNodeSpec(branch.IfElse.GetElseNode(), tasks, errs) + ns, ok := buildNodeSpec(branch.GetIfElse().GetElseNode(), tasks, errs) if !ok { return nil, []*v1alpha1.NodeSpec{} } childNodes = append(childNodes, ns...) - res.Else = refStr(branch.IfElse.GetElseNode().Id) + res.Else = refStr(branch.GetIfElse().GetElseNode().GetId()) case *core.IfElseBlock_Error: - res.ElseFail = branch.IfElse.GetError() + res.ElseFail = branch.GetIfElse().GetError() } - other := make([]*v1alpha1.IfBlock, 0, len(branch.IfElse.Other)) - for _, block := range branch.IfElse.Other { + other := make([]*v1alpha1.IfBlock, 0, len(branch.GetIfElse().GetOther())) + for _, block := range branch.GetIfElse().GetOther() { b, ns := buildIfBlockSpec(block, tasks, errs.NewScope()) other = append(other, b) childNodes = append(childNodes, ns...) @@ -285,12 +285,12 @@ func buildTasks(tasks []*core.CompiledTask, errs errors.CompileErrors) map[commo if flyteTask == nil { errs.Collect(errors.NewValueRequiredErr("root", "coreTask")) } else { - taskID := flyteTask.Template.Id.String() + taskID := flyteTask.GetTemplate().GetId().String() if _, exists := res[taskID]; exists { errs.Collect(errors.NewValueCollisionError(taskID, "Id", taskID)) } - res[taskID] = &v1alpha1.TaskSpec{TaskTemplate: flyteTask.Template} + res[taskID] = &v1alpha1.TaskSpec{TaskTemplate: flyteTask.GetTemplate()} } } diff --git a/flytepropeller/pkg/compiler/transformers/k8s/node_test.go b/flytepropeller/pkg/compiler/transformers/k8s/node_test.go index c6a08b5991..28fbb2bf55 100644 --- a/flytepropeller/pkg/compiler/transformers/k8s/node_test.go +++ b/flytepropeller/pkg/compiler/transformers/k8s/node_test.go @@ -175,7 +175,7 @@ func TestBuildNodeSpec(t *testing.T) { n.Node.Target = &core.Node_WorkflowNode{ WorkflowNode: &core.WorkflowNode{ Reference: &core.WorkflowNode_SubWorkflowRef{ - SubWorkflowRef: n.subWF.GetCoreWorkflow().Template.Id, + SubWorkflowRef: n.subWF.GetCoreWorkflow().GetTemplate().GetId(), }, }, } @@ -394,15 +394,15 @@ func TestBuildTasks(t *testing.T) { taskMap := buildTasks(tasks, errs) annInputTask := taskMap[(&core.Identifier{Name: "annotatedInput"}).String()] - assert.Nil(t, annInputTask.Interface.Inputs.Variables["a"].Type.Annotation) + assert.Nil(t, annInputTask.Interface.GetInputs().GetVariables()["a"].GetType().GetAnnotation()) unAnnInputTask := taskMap[(&core.Identifier{Name: "unannotatedInput"}).String()] - assert.Nil(t, unAnnInputTask.Interface.Inputs.Variables["a"].Type.Annotation) + assert.Nil(t, unAnnInputTask.Interface.GetInputs().GetVariables()["a"].GetType().GetAnnotation()) annOutputTask := taskMap[(&core.Identifier{Name: "annotatedOutput"}).String()] - assert.Nil(t, annOutputTask.Interface.Outputs.Variables["a"].Type.Annotation) + assert.Nil(t, annOutputTask.Interface.GetOutputs().GetVariables()["a"].GetType().GetAnnotation()) unAnnOutputTask := taskMap[(&core.Identifier{Name: "unannotatedOutput"}).String()] - assert.Nil(t, unAnnOutputTask.Interface.Outputs.Variables["a"].Type.Annotation) + assert.Nil(t, unAnnOutputTask.Interface.GetOutputs().GetVariables()["a"].GetType().GetAnnotation()) }) } diff --git a/flytepropeller/pkg/compiler/transformers/k8s/utils.go b/flytepropeller/pkg/compiler/transformers/k8s/utils.go index 06884f4b75..bd08be3a9a 100644 --- a/flytepropeller/pkg/compiler/transformers/k8s/utils.go +++ b/flytepropeller/pkg/compiler/transformers/k8s/utils.go @@ -19,15 +19,15 @@ func refStr(s string) *string { } func computeRetryStrategy(n *core.Node, t *core.TaskTemplate) *v1alpha1.RetryStrategy { - if n.GetMetadata() != nil && n.GetMetadata().GetRetries() != nil && n.GetMetadata().GetRetries().Retries != 0 { + if n.GetMetadata() != nil && n.GetMetadata().GetRetries() != nil && n.GetMetadata().GetRetries().GetRetries() != 0 { return &v1alpha1.RetryStrategy{ - MinAttempts: refInt(int(n.GetMetadata().GetRetries().Retries + 1)), + MinAttempts: refInt(int(n.GetMetadata().GetRetries().GetRetries() + 1)), } } - if t != nil && t.GetMetadata() != nil && t.GetMetadata().GetRetries() != nil && t.GetMetadata().GetRetries().Retries != 0 { + if t != nil && t.GetMetadata() != nil && t.GetMetadata().GetRetries() != nil && t.GetMetadata().GetRetries().GetRetries() != 0 { return &v1alpha1.RetryStrategy{ - MinAttempts: refInt(int(t.GetMetadata().GetRetries().Retries + 1)), + MinAttempts: refInt(int(t.GetMetadata().GetRetries().GetRetries() + 1)), } } @@ -91,10 +91,10 @@ func StripTypeMetadata(t *core.LiteralType) *core.LiteralType { // interface of the dynamically compiled workflow. `Structure` is used to extend type checking information on // different Flyte types and is therefore required to ensure correct type validation. - switch underlyingType := c.Type.(type) { + switch underlyingType := c.GetType().(type) { case *core.LiteralType_UnionType: - variants := make([]*core.LiteralType, 0, len(c.GetUnionType().Variants)) - for _, variant := range c.GetUnionType().Variants { + variants := make([]*core.LiteralType, 0, len(c.GetUnionType().GetVariants())) + for _, variant := range c.GetUnionType().GetVariants() { variants = append(variants, StripTypeMetadata(variant)) } @@ -104,11 +104,11 @@ func StripTypeMetadata(t *core.LiteralType) *core.LiteralType { case *core.LiteralType_CollectionType: underlyingType.CollectionType = StripTypeMetadata(c.GetCollectionType()) case *core.LiteralType_StructuredDatasetType: - columns := make([]*core.StructuredDatasetType_DatasetColumn, 0, len(c.GetStructuredDatasetType().Columns)) - for _, column := range c.GetStructuredDatasetType().Columns { + columns := make([]*core.StructuredDatasetType_DatasetColumn, 0, len(c.GetStructuredDatasetType().GetColumns())) + for _, column := range c.GetStructuredDatasetType().GetColumns() { columns = append(columns, &core.StructuredDatasetType_DatasetColumn{ - Name: column.Name, - LiteralType: StripTypeMetadata(column.LiteralType), + Name: column.GetName(), + LiteralType: StripTypeMetadata(column.GetLiteralType()), }) } @@ -125,17 +125,17 @@ func StripInterfaceTypeMetadata(iface *core.TypedInterface) *core.TypedInterface newIface := *iface - if iface.Inputs != nil { - for name, i := range iface.Inputs.Variables { - i.Type = StripTypeMetadata(i.Type) + if iface.GetInputs() != nil { + for name, i := range iface.GetInputs().GetVariables() { + i.Type = StripTypeMetadata(i.GetType()) i.Description = "" newIface.Inputs.Variables[name] = i } } - if iface.Outputs != nil { - for name, i := range iface.Outputs.Variables { - i.Type = StripTypeMetadata(i.Type) + if iface.GetOutputs() != nil { + for name, i := range iface.GetOutputs().GetVariables() { + i.Type = StripTypeMetadata(i.GetType()) i.Description = "" iface.Outputs.Variables[name] = i } diff --git a/flytepropeller/pkg/compiler/transformers/k8s/utils_test.go b/flytepropeller/pkg/compiler/transformers/k8s/utils_test.go index d2d9b10866..0a7e991399 100644 --- a/flytepropeller/pkg/compiler/transformers/k8s/utils_test.go +++ b/flytepropeller/pkg/compiler/transformers/k8s/utils_test.go @@ -14,9 +14,9 @@ func TestComputeRetryStrategy(t *testing.T) { tests := []struct { name string - nodeRetries int - taskRetries int - expectedRetries int + nodeRetries uint32 + taskRetries uint32 + expectedRetries uint32 }{ {"node-only", 1, 0, 2}, {"task-only", 0, 1, 2}, @@ -31,7 +31,7 @@ func TestComputeRetryStrategy(t *testing.T) { node = &core.Node{ Metadata: &core.NodeMetadata{ Retries: &core.RetryStrategy{ - Retries: uint32(test.nodeRetries), + Retries: test.nodeRetries, }, }, } @@ -42,7 +42,7 @@ func TestComputeRetryStrategy(t *testing.T) { tmpl = &core.TaskTemplate{ Metadata: &core.TaskMetadata{ Retries: &core.RetryStrategy{ - Retries: uint32(test.taskRetries), + Retries: test.taskRetries, }, }, } @@ -51,7 +51,7 @@ func TestComputeRetryStrategy(t *testing.T) { r := computeRetryStrategy(node, tmpl) if test.expectedRetries != 0 { assert.NotNil(t, r) - assert.Equal(t, test.expectedRetries, *r.MinAttempts) + assert.Equal(t, int(test.expectedRetries), *r.MinAttempts) // #nosec G115 } else { assert.Nil(t, r) } @@ -292,7 +292,7 @@ func TestStripInterfaceTypeMetadata(t *testing.T) { } stripped := StripInterfaceTypeMetadata(i) - assert.Nil(t, stripped.Inputs.Variables["a"].Type.Metadata) - assert.Nil(t, stripped.Outputs.Variables["a"].Type.Metadata) + assert.Nil(t, stripped.GetInputs().GetVariables()["a"].GetType().GetMetadata()) + assert.Nil(t, stripped.GetOutputs().GetVariables()["a"].GetType().GetMetadata()) }) } diff --git a/flytepropeller/pkg/compiler/transformers/k8s/workflow.go b/flytepropeller/pkg/compiler/transformers/k8s/workflow.go index 2421ddf9bb..eb9023bfa2 100644 --- a/flytepropeller/pkg/compiler/transformers/k8s/workflow.go +++ b/flytepropeller/pkg/compiler/transformers/k8s/workflow.go @@ -39,13 +39,13 @@ func requiresInputs(w *core.WorkflowTemplate) bool { return false } - return len(w.GetInterface().GetInputs().Variables) > 0 + return len(w.GetInterface().GetInputs().GetVariables()) > 0 } // Note: Update WorkflowNameFromID for any change made to WorkflowIDAsString func WorkflowIDAsString(id *core.Identifier) string { b := strings.Builder{} - _, err := b.WriteString(id.Project) + _, err := b.WriteString(id.GetProject()) if err != nil { return "" } @@ -55,7 +55,7 @@ func WorkflowIDAsString(id *core.Identifier) string { return "" } - _, err = b.WriteString(id.Domain) + _, err = b.WriteString(id.GetDomain()) if err != nil { return "" } @@ -65,7 +65,7 @@ func WorkflowIDAsString(id *core.Identifier) string { return "" } - _, err = b.WriteString(id.Name) + _, err = b.WriteString(id.GetName()) if err != nil { return "" } @@ -83,10 +83,10 @@ func WorkflowNameFromID(id string) string { func buildFlyteWorkflowSpec(wf *core.CompiledWorkflow, tasks []*core.CompiledTask, errs errors.CompileErrors) ( spec *v1alpha1.WorkflowSpec, err error) { - wf.Template.Interface = StripInterfaceTypeMetadata(wf.Template.Interface) + wf.Template.Interface = StripInterfaceTypeMetadata(wf.GetTemplate().GetInterface()) var failureN *v1alpha1.NodeSpec - if n := wf.Template.GetFailureNode(); n != nil { + if n := wf.GetTemplate().GetFailureNode(); n != nil { nodes, ok := buildNodeSpec(n, tasks, errs.NewScope()) if !ok { return nil, errs @@ -94,34 +94,34 @@ func buildFlyteWorkflowSpec(wf *core.CompiledWorkflow, tasks []*core.CompiledTas failureN = nodes[0] } - nodes, _ := buildNodes(wf.Template.GetNodes(), tasks, errs.NewScope()) + nodes, _ := buildNodes(wf.GetTemplate().GetNodes(), tasks, errs.NewScope()) if errs.HasErrors() { return nil, errs } - outputBindings := make([]*v1alpha1.Binding, 0, len(wf.Template.Outputs)) - for _, b := range wf.Template.Outputs { + outputBindings := make([]*v1alpha1.Binding, 0, len(wf.GetTemplate().GetOutputs())) + for _, b := range wf.GetTemplate().GetOutputs() { outputBindings = append(outputBindings, &v1alpha1.Binding{ Binding: b, }) } var outputs *v1alpha1.OutputVarMap - if wf.Template.GetInterface() != nil { - outputs = &v1alpha1.OutputVarMap{VariableMap: wf.Template.GetInterface().Outputs} + if wf.GetTemplate().GetInterface() != nil { + outputs = &v1alpha1.OutputVarMap{VariableMap: wf.GetTemplate().GetInterface().GetOutputs()} } else { outputs = &v1alpha1.OutputVarMap{VariableMap: &core.VariableMap{}} } failurePolicy := v1alpha1.WorkflowOnFailurePolicy(core.WorkflowMetadata_FAIL_IMMEDIATELY) - if wf.Template != nil && wf.Template.Metadata != nil { - failurePolicy = v1alpha1.WorkflowOnFailurePolicy(wf.Template.Metadata.OnFailure) + if wf.GetTemplate() != nil && wf.GetTemplate().GetMetadata() != nil { + failurePolicy = v1alpha1.WorkflowOnFailurePolicy(wf.GetTemplate().GetMetadata().GetOnFailure()) } connections := buildConnections(wf) return &v1alpha1.WorkflowSpec{ - ID: WorkflowIDAsString(wf.Template.Id), + ID: WorkflowIDAsString(wf.GetTemplate().GetId()), OnFailure: failureN, Nodes: nodes, Outputs: outputs, @@ -147,13 +147,13 @@ func generateName(wfID *core.Identifier, execID *core.WorkflowExecutionIdentifie name string, generateName string, label string, project string, domain string, err error) { if execID != nil { - return execID.Name, "", execID.Name, execID.Project, execID.Domain, nil + return execID.GetName(), "", execID.GetName(), execID.GetProject(), execID.GetDomain(), nil } else if wfID != nil { - wid := fmt.Sprintf("%v%v%v", withSeparatorIfNotEmpty(wfID.Project), withSeparatorIfNotEmpty(wfID.Domain), wfID.Name) + wid := fmt.Sprintf("%v%v%v", withSeparatorIfNotEmpty(wfID.GetProject()), withSeparatorIfNotEmpty(wfID.GetDomain()), wfID.GetName()) // TODO: this is a hack until we figure out how to restrict generated names. K8s has a limitation of 63 chars wid = wid[:minInt(32, len(wid))] - return "", fmt.Sprintf("%v-", wid), wid, wfID.Project, wfID.Domain, nil + return "", fmt.Sprintf("%v-", wid), wid, wfID.GetProject(), wfID.GetDomain(), nil } else { return "", "", "", "", "", fmt.Errorf("expected param not set. wfID or execID must be non-nil values") } @@ -169,8 +169,8 @@ func BuildFlyteWorkflow(wfClosure *core.CompiledWorkflowClosure, inputs *core.Li return nil, errs } - wf := wfClosure.Primary.Template - tasks := wfClosure.Tasks + wf := wfClosure.GetPrimary().GetTemplate() + tasks := wfClosure.GetTasks() // Fill in inputs in the start node. if inputs != nil { if ok := validateInputs(common.StartNodeID, wf.GetInterface(), *inputs, errs.NewScope()); !ok { @@ -182,22 +182,22 @@ func BuildFlyteWorkflow(wfClosure *core.CompiledWorkflowClosure, inputs *core.Li } for _, t := range tasks { - t.Template.Interface = StripInterfaceTypeMetadata(t.Template.Interface) + t.Template.Interface = StripInterfaceTypeMetadata(t.GetTemplate().GetInterface()) } - primarySpec, err := buildFlyteWorkflowSpec(wfClosure.Primary, tasks, errs.NewScope()) + primarySpec, err := buildFlyteWorkflowSpec(wfClosure.GetPrimary(), tasks, errs.NewScope()) if err != nil { errs.Collect(errors.NewWorkflowBuildError(err)) return nil, errs } - subwfs := make(map[v1alpha1.WorkflowID]*v1alpha1.WorkflowSpec, len(wfClosure.SubWorkflows)) - for _, subWf := range wfClosure.SubWorkflows { + subwfs := make(map[v1alpha1.WorkflowID]*v1alpha1.WorkflowSpec, len(wfClosure.GetSubWorkflows())) + for _, subWf := range wfClosure.GetSubWorkflows() { spec, err := buildFlyteWorkflowSpec(subWf, tasks, errs.NewScope()) if err != nil { errs.Collect(errors.NewWorkflowBuildError(err)) } else { - subwfs[subWf.Template.Id.String()] = spec + subwfs[subWf.GetTemplate().GetId().String()] = spec } } @@ -266,7 +266,7 @@ func BuildFlyteWorkflow(wfClosure *core.CompiledWorkflowClosure, inputs *core.Li func toMapOfLists(connections map[string]*core.ConnectionSet_IdList) map[string][]string { res := make(map[string][]string, len(connections)) for key, val := range connections { - res[key] = val.Ids + res[key] = val.GetIds() } return res @@ -292,24 +292,24 @@ func BuildWfClosureCrdFields(wfClosure *core.CompiledWorkflowClosure) (*WfClosur return nil, errs } - primarySpec, err := buildFlyteWorkflowSpec(wfClosure.Primary, wfClosure.Tasks, errs.NewScope()) + primarySpec, err := buildFlyteWorkflowSpec(wfClosure.GetPrimary(), wfClosure.GetTasks(), errs.NewScope()) if err != nil { errs.Collect(errors.NewWorkflowBuildError(err)) return nil, errs } - for _, t := range wfClosure.Tasks { - t.Template.Interface = StripInterfaceTypeMetadata(t.Template.Interface) + for _, t := range wfClosure.GetTasks() { + t.Template.Interface = StripInterfaceTypeMetadata(t.GetTemplate().GetInterface()) } - tasks := buildTasks(wfClosure.Tasks, errs.NewScope()) + tasks := buildTasks(wfClosure.GetTasks(), errs.NewScope()) - subwfs := make(map[v1alpha1.WorkflowID]*v1alpha1.WorkflowSpec, len(wfClosure.SubWorkflows)) - for _, subWf := range wfClosure.SubWorkflows { - spec, err := buildFlyteWorkflowSpec(subWf, wfClosure.Tasks, errs.NewScope()) + subwfs := make(map[v1alpha1.WorkflowID]*v1alpha1.WorkflowSpec, len(wfClosure.GetSubWorkflows())) + for _, subWf := range wfClosure.GetSubWorkflows() { + spec, err := buildFlyteWorkflowSpec(subWf, wfClosure.GetTasks(), errs.NewScope()) if err != nil { errs.Collect(errors.NewWorkflowBuildError(err)) } else { - subwfs[subWf.Template.Id.String()] = spec + subwfs[subWf.GetTemplate().GetId().String()] = spec } } diff --git a/flytepropeller/pkg/compiler/transformers/k8s/workflow_test.go b/flytepropeller/pkg/compiler/transformers/k8s/workflow_test.go index dbb51e25eb..378343ec20 100644 --- a/flytepropeller/pkg/compiler/transformers/k8s/workflow_test.go +++ b/flytepropeller/pkg/compiler/transformers/k8s/workflow_test.go @@ -331,10 +331,10 @@ func TestBuildFlyteWorkflow_withBranch(t *testing.T) { w := &core.CompiledWorkflowClosure{} assert.NoError(t, utils.UnmarshalBytesToPb(c, w)) - assert.Len(t, w.Primary.Connections.Downstream, 2) - ids := w.Primary.Connections.Downstream["start-node"] - assert.Len(t, ids.Ids, 1) - assert.Equal(t, ids.Ids[0], "n0") + assert.Len(t, w.GetPrimary().GetConnections().GetDownstream(), 2) + ids := w.GetPrimary().GetConnections().GetDownstream()["start-node"] + assert.Len(t, ids.GetIds(), 1) + assert.Equal(t, ids.GetIds()[0], "n0") wf, err := BuildFlyteWorkflow( w, diff --git a/flytepropeller/pkg/compiler/validators/bindings.go b/flytepropeller/pkg/compiler/validators/bindings.go index b69dda529f..fd317036fa 100644 --- a/flytepropeller/pkg/compiler/validators/bindings.go +++ b/flytepropeller/pkg/compiler/validators/bindings.go @@ -109,7 +109,7 @@ func validateBinding(w c.WorkflowBuilder, node c.Node, nodeParam string, binding return nil, nil, !errs.HasErrors() } - if upNode, found := validateNodeID(w, val.Promise.NodeId, errs.NewScope()); found { + if upNode, found := validateNodeID(w, val.Promise.GetNodeId(), errs.NewScope()); found { v, err := typing.ParseVarName(val.Promise.GetVar()) if err != nil { errs.Collect(errors.NewSyntaxError(nodeID, val.Promise.GetVar(), err)) @@ -117,28 +117,28 @@ func validateBinding(w c.WorkflowBuilder, node c.Node, nodeParam string, binding } inputVar := nodeParam - outputVar := val.Promise.Var + outputVar := val.Promise.GetVar() if node.GetMetadata() != nil { - inputVar = fmt.Sprintf("%s.%s", node.GetMetadata().Name, nodeParam) + inputVar = fmt.Sprintf("%s.%s", node.GetMetadata().GetName(), nodeParam) } if upNode.GetMetadata() != nil { - outputVar = fmt.Sprintf("%s.%s", upNode.GetMetadata().Name, val.Promise.Var) + outputVar = fmt.Sprintf("%s.%s", upNode.GetMetadata().GetName(), val.Promise.GetVar()) } if param, paramFound := validateOutputVar(upNode, v.Name, errs.NewScope()); paramFound { - sourceType := param.Type + sourceType := param.GetType() // If the variable has an index. We expect param to be a collection. if v.Index != nil { if cType := param.GetType().GetCollectionType(); cType == nil { - errs.Collect(errors.NewMismatchingVariablesErr(nodeID, outputVar, c.LiteralTypeToStr(param.Type), inputVar, c.LiteralTypeToStr(expectedType))) + errs.Collect(errors.NewMismatchingVariablesErr(nodeID, outputVar, c.LiteralTypeToStr(param.GetType()), inputVar, c.LiteralTypeToStr(expectedType))) } else { sourceType = cType } } // If the variable has an attribute path. Extract the type of the last attribute. - for _, attr := range val.Promise.AttrPath { + for _, attr := range val.Promise.GetAttrPath() { var tmpType *flyte.LiteralType var exist bool @@ -152,7 +152,7 @@ func validateBinding(w c.WorkflowBuilder, node c.Node, nodeParam string, binding if !exist { // the error should output the sourceType instead of tmpType because tmpType is nil - errs.Collect(errors.NewFieldNotFoundErr(nodeID, val.Promise.Var, sourceType.String(), attr.GetStringValue())) + errs.Collect(errors.NewFieldNotFoundErr(nodeID, val.Promise.GetVar(), sourceType.String(), attr.GetStringValue())) return nil, nil, !errs.HasErrors() } sourceType = tmpType @@ -161,7 +161,7 @@ func validateBinding(w c.WorkflowBuilder, node c.Node, nodeParam string, binding if !validateParamTypes || AreTypesCastable(sourceType, expectedType) { val.Promise.NodeId = upNode.GetId() - return param.GetType(), []c.NodeID{val.Promise.NodeId}, true + return param.GetType(), []c.NodeID{val.Promise.GetNodeId()}, true } errs.Collect(errors.NewMismatchingVariablesErr(node.GetId(), outputVar, c.LiteralTypeToStr(sourceType), inputVar, c.LiteralTypeToStr(expectedType))) @@ -187,14 +187,14 @@ func validateBinding(w c.WorkflowBuilder, node c.Node, nodeParam string, binding v := val.Scalar.GetPrimitive().GetStringValue() // Let us assert that the bound value is a correct enum Value found := false - for _, ev := range expectedType.GetEnumType().Values { + for _, ev := range expectedType.GetEnumType().GetValues() { if ev == v { found = true break } } if !found { - errs.Collect(errors.NewIllegalEnumValueError(nodeID, nodeParam, v, expectedType.GetEnumType().Values)) + errs.Collect(errors.NewIllegalEnumValueError(nodeID, nodeParam, v, expectedType.GetEnumType().GetValues())) } } @@ -237,7 +237,7 @@ func ValidateBindings(w c.WorkflowBuilder, node c.Node, bindings []*flyte.Bindin providedBindings.Insert(binding.GetVar()) if resolvedType, upstreamNodes, bindingOk := validateBinding(w, node, binding.GetVar(), binding.GetBinding(), - param.Type, errs.NewScope(), validateParamTypes); bindingOk { + param.GetType(), errs.NewScope(), validateParamTypes); bindingOk { for _, upNode := range upstreamNodes { // Add implicit Edges switch edgeDirection { @@ -259,7 +259,7 @@ func ValidateBindings(w c.WorkflowBuilder, node c.Node, bindings []*flyte.Bindin // If we missed binding some params, add errors if params != nil { - for paramName, Variable := range params.Variables { + for paramName, Variable := range params.GetVariables() { if !providedBindings.Has(paramName) && !IsOptionalType(*Variable) { errs.Collect(errors.NewParameterNotBoundErr(node.GetId(), paramName)) } @@ -271,10 +271,10 @@ func ValidateBindings(w c.WorkflowBuilder, node c.Node, bindings []*flyte.Bindin // IsOptionalType Return true if there is a None type in Union Type func IsOptionalType(variable flyte.Variable) bool { - if variable.Type.GetUnionType() == nil { + if variable.GetType().GetUnionType() == nil { return false } - for _, variant := range variable.Type.GetUnionType().Variants { + for _, variant := range variable.GetType().GetUnionType().GetVariants() { if flyte.SimpleType_NONE == variant.GetSimple() { return true } diff --git a/flytepropeller/pkg/compiler/validators/bindings_test.go b/flytepropeller/pkg/compiler/validators/bindings_test.go index bcb498eebd..e817ba5d5c 100644 --- a/flytepropeller/pkg/compiler/validators/bindings_test.go +++ b/flytepropeller/pkg/compiler/validators/bindings_test.go @@ -24,8 +24,8 @@ func LiteralToBinding(l *core.Literal) *core.BindingData { }, } case *core.Literal_Collection: - x := make([]*core.BindingData, 0, len(l.GetCollection().Literals)) - for _, sub := range l.GetCollection().Literals { + x := make([]*core.BindingData, 0, len(l.GetCollection().GetLiterals())) + for _, sub := range l.GetCollection().GetLiterals() { x = append(x, LiteralToBinding(sub)) } @@ -37,8 +37,8 @@ func LiteralToBinding(l *core.Literal) *core.BindingData { }, } case *core.Literal_Map: - x := make(map[string]*core.BindingData, len(l.GetMap().Literals)) - for key, val := range l.GetMap().Literals { + x := make(map[string]*core.BindingData, len(l.GetMap().GetLiterals())) + for key, val := range l.GetMap().GetLiterals() { x[key] = LiteralToBinding(val) } @@ -63,7 +63,7 @@ func TestValidateBindings(t *testing.T) { compileErrors := compilerErrors.NewCompileErrors() resolved, ok := ValidateBindings(wf, n, bindings, vars, true, c.EdgeDirectionBidirectional, compileErrors) assert.True(t, ok) - assert.Empty(t, resolved.Variables) + assert.Empty(t, resolved.GetVariables()) }) t.Run("Variable not in inputs", func(t *testing.T) { diff --git a/flytepropeller/pkg/compiler/validators/branch.go b/flytepropeller/pkg/compiler/validators/branch.go index 386f1cecda..94e4bea7ad 100644 --- a/flytepropeller/pkg/compiler/validators/branch.go +++ b/flytepropeller/pkg/compiler/validators/branch.go @@ -18,17 +18,17 @@ func validateBranchInterface(w c.WorkflowBuilder, node c.NodeBuilder, errs error return nil, false } - if ifBlock := node.GetBranchNode().IfElse; ifBlock == nil { + if ifBlock := node.GetBranchNode().GetIfElse(); ifBlock == nil { errs.Collect(errors.NewValueRequiredErr(node.GetId(), "Branch.IfElse")) return nil, false } - if ifCase := node.GetBranchNode().IfElse.Case; ifCase == nil { + if ifCase := node.GetBranchNode().GetIfElse().GetCase(); ifCase == nil { errs.Collect(errors.NewValueRequiredErr(node.GetId(), "Branch.IfElse.Case")) return nil, false } - if thenNode := node.GetBranchNode().IfElse.Case.ThenNode; thenNode == nil { + if thenNode := node.GetBranchNode().GetIfElse().GetCase().GetThenNode(); thenNode == nil { errs.Collect(errors.NewValueRequiredErr(node.GetId(), "Branch.IfElse.Case.ThenNode")) return nil, false } @@ -37,33 +37,33 @@ func validateBranchInterface(w c.WorkflowBuilder, node c.NodeBuilder, errs error finalOutputParameterNames := sets.NewString() validateIfaceMatch := func(nodeId string, iface2 *flyte.TypedInterface, errsScope errors.CompileErrors) (match bool) { - outputs2, outputs2Set := buildVariablesIndex(iface2.Outputs) + outputs2, outputs2Set := buildVariablesIndex(iface2.GetOutputs()) // Validate that parameters that exist in both interfaces have compatible types. finalOutputParameterNames = finalOutputParameterNames.Intersection(outputs2Set) for paramName := range finalOutputParameterNames { - if validateVarType(nodeId, paramName, outputs[paramName], outputs2[paramName].Type, errs.NewScope()) { - validateVarType(nodeId, paramName, outputs2[paramName], outputs[paramName].Type, errs.NewScope()) + if validateVarType(nodeId, paramName, outputs[paramName], outputs2[paramName].GetType(), errs.NewScope()) { + validateVarType(nodeId, paramName, outputs2[paramName], outputs[paramName].GetType(), errs.NewScope()) } } return !errsScope.HasErrors() } - cases := make([]*flyte.Node, 0, len(node.GetBranchNode().IfElse.Other)+1) - caseBlock := node.GetBranchNode().IfElse.Case - cases = append(cases, caseBlock.ThenNode) + cases := make([]*flyte.Node, 0, len(node.GetBranchNode().GetIfElse().GetOther())+1) + caseBlock := node.GetBranchNode().GetIfElse().GetCase() + cases = append(cases, caseBlock.GetThenNode()) - otherCases := node.GetBranchNode().IfElse.Other + otherCases := node.GetBranchNode().GetIfElse().GetOther() for _, otherCase := range otherCases { - if otherCase.ThenNode == nil { + if otherCase.GetThenNode() == nil { errs.Collect(errors.NewValueRequiredErr(node.GetId(), "IfElse.Case.ThenNode")) continue } - cases = append(cases, otherCase.ThenNode) + cases = append(cases, otherCase.GetThenNode()) } - if elseNode := node.GetBranchNode().IfElse.GetElseNode(); elseNode != nil { + if elseNode := node.GetBranchNode().GetIfElse().GetElseNode(); elseNode != nil { cases = append(cases, elseNode) } @@ -79,12 +79,12 @@ func validateBranchInterface(w c.WorkflowBuilder, node c.NodeBuilder, errs error // match. We will pull the inputs needed for the underlying branch node at runtime. iface2 = &flyte.TypedInterface{ Inputs: &flyte.VariableMap{Variables: map[string]*flyte.Variable{}}, - Outputs: iface2.Outputs, + Outputs: iface2.GetOutputs(), } if iface == nil { iface = iface2 - outputs, finalOutputParameterNames = buildVariablesIndex(iface.Outputs) + outputs, finalOutputParameterNames = buildVariablesIndex(iface.GetOutputs()) } else { validateIfaceMatch(n.GetId(), iface2, errs.NewScope()) } @@ -99,7 +99,7 @@ func validateBranchInterface(w c.WorkflowBuilder, node c.NodeBuilder, errs error if !errs.HasErrors() && iface != nil { iface = &flyte.TypedInterface{ Inputs: inputVarsFromBindings, - Outputs: filterVariables(iface.Outputs, finalOutputParameterNames), + Outputs: filterVariables(iface.GetOutputs(), finalOutputParameterNames), } } else { iface = nil diff --git a/flytepropeller/pkg/compiler/validators/condition.go b/flytepropeller/pkg/compiler/validators/condition.go index 70b72cde8a..c402040135 100644 --- a/flytepropeller/pkg/compiler/validators/condition.go +++ b/flytepropeller/pkg/compiler/validators/condition.go @@ -48,8 +48,8 @@ func ValidateBooleanExpression(w c.WorkflowBuilder, node c.NodeBuilder, expr *fl } } } else if expr.GetConjunction() != nil { - ValidateBooleanExpression(w, node, expr.GetConjunction().LeftExpression, requireParamType, errs.NewScope()) - ValidateBooleanExpression(w, node, expr.GetConjunction().RightExpression, requireParamType, errs.NewScope()) + ValidateBooleanExpression(w, node, expr.GetConjunction().GetLeftExpression(), requireParamType, errs.NewScope()) + ValidateBooleanExpression(w, node, expr.GetConjunction().GetRightExpression(), requireParamType, errs.NewScope()) } else { errs.Collect(errors.NewValueRequiredErr(node.GetId(), "Expr")) } diff --git a/flytepropeller/pkg/compiler/validators/interface.go b/flytepropeller/pkg/compiler/validators/interface.go index a71c52e49a..fe22a9fb48 100644 --- a/flytepropeller/pkg/compiler/validators/interface.go +++ b/flytepropeller/pkg/compiler/validators/interface.go @@ -17,14 +17,14 @@ func ValidateInterface(nodeID c.NodeID, iface *core.TypedInterface, errs errors. } // validate InputsRef/OutputsRef parameters required attributes are set - if iface.Inputs != nil && iface.Inputs.Variables != nil { - validateVariables(nodeID, iface.Inputs, errs.NewScope()) + if iface.GetInputs() != nil && iface.Inputs.Variables != nil { + validateVariables(nodeID, iface.GetInputs(), errs.NewScope()) } else { iface.Inputs = &core.VariableMap{Variables: map[string]*core.Variable{}} } - if iface.Outputs != nil && iface.Outputs.Variables != nil { - validateVariables(nodeID, iface.Outputs, errs.NewScope()) + if iface.GetOutputs() != nil && iface.Outputs.Variables != nil { + validateVariables(nodeID, iface.GetOutputs(), errs.NewScope()) } else { iface.Outputs = &core.VariableMap{Variables: map[string]*core.Variable{}} } @@ -55,8 +55,8 @@ func ValidateUnderlyingInterface(w c.WorkflowBuilder, node c.NodeBuilder, errs e errs.Collect(errors.NewTaskReferenceNotFoundErr(node.GetId(), node.GetTaskNode().GetReferenceId().String())) } case *core.Node_WorkflowNode: - if node.GetWorkflowNode().GetLaunchplanRef().String() == w.GetCoreWorkflow().Template.Id.String() { - iface = w.GetCoreWorkflow().Template.Interface + if node.GetWorkflowNode().GetLaunchplanRef().String() == w.GetCoreWorkflow().GetTemplate().GetId().String() { + iface = w.GetCoreWorkflow().GetTemplate().GetInterface() if iface == nil { errs.Collect(errors.NewValueRequiredErr(node.GetId(), "WorkflowNode.Interface")) } @@ -75,11 +75,11 @@ func ValidateUnderlyingInterface(w c.WorkflowBuilder, node c.NodeBuilder, errs e // Compute exposed inputs as the union of all required inputs and any input overwritten by the node. exposedInputs := map[string]*core.Variable{} if inputs != nil && inputs.Parameters != nil { - for name, p := range inputs.Parameters { + for name, p := range inputs.GetParameters() { if p.GetRequired() { - exposedInputs[name] = p.Var + exposedInputs[name] = p.GetVar() } else if containsBindingByVariableName(node.GetInputs(), name) { - exposedInputs[name] = p.Var + exposedInputs[name] = p.GetVar() } // else, the param has a default value and is not being overwritten by the node } @@ -98,10 +98,10 @@ func ValidateUnderlyingInterface(w c.WorkflowBuilder, node c.NodeBuilder, errs e } } else if node.GetWorkflowNode().GetSubWorkflowRef() != nil { if wf, wfOk := w.GetSubWorkflow(node.GetWorkflowNode().GetSubWorkflowRef()); wfOk { - if wf.Template == nil { + if wf.GetTemplate() == nil { errs.Collect(errors.NewValueRequiredErr(node.GetId(), "WorkflowNode.Template")) } else { - iface = wf.Template.Interface + iface = wf.GetTemplate().GetInterface() if iface == nil { errs.Collect(errors.NewValueRequiredErr(node.GetId(), "WorkflowNode.Template.Interface")) } @@ -155,7 +155,7 @@ func ValidateUnderlyingInterface(w c.WorkflowBuilder, node c.NodeBuilder, errs e } case *core.Node_ArrayNode: arrayNode := node.GetArrayNode() - underlyingNodeBuilder := w.GetOrCreateNodeBuilder(arrayNode.Node) + underlyingNodeBuilder := w.GetOrCreateNodeBuilder(arrayNode.GetNode()) if underlyingIface, ok := ValidateUnderlyingInterface(w, underlyingNodeBuilder, errs.NewScope()); ok { // ArrayNode interface should be inferred from the underlying node interface. flytekit // will correct wrap variables in collections as needed, leaving partials as is. diff --git a/flytepropeller/pkg/compiler/validators/interface_test.go b/flytepropeller/pkg/compiler/validators/interface_test.go index 85c031c0a7..ba987bda62 100644 --- a/flytepropeller/pkg/compiler/validators/interface_test.go +++ b/flytepropeller/pkg/compiler/validators/interface_test.go @@ -66,10 +66,10 @@ func assertNonEmptyInterface(t testing.TB, iface *core.TypedInterface, ifaceOk b t.Fatal(errs) } - assert.NotNil(t, iface.Inputs) - assert.NotNil(t, iface.Inputs.Variables) - assert.NotNil(t, iface.Outputs) - assert.NotNil(t, iface.Outputs.Variables) + assert.NotNil(t, iface.GetInputs()) + assert.NotNil(t, iface.GetInputs().GetVariables()) + assert.NotNil(t, iface.GetOutputs()) + assert.NotNil(t, iface.GetOutputs().GetVariables()) } func TestValidateUnderlyingInterface(t *testing.T) { @@ -419,8 +419,8 @@ func TestValidateUnderlyingInterface(t *testing.T) { taskNodeBuilder := &mocks.NodeBuilder{} taskNodeBuilder.On("GetCoreNode").Return(taskNode) - taskNodeBuilder.On("GetId").Return(taskNode.Id) - taskNodeBuilder.On("GetTaskNode").Return(taskNode.Target.(*core.Node_TaskNode).TaskNode) + taskNodeBuilder.On("GetId").Return(taskNode.GetId()) + taskNodeBuilder.On("GetTaskNode").Return(taskNode.GetTarget().(*core.Node_TaskNode).TaskNode) taskNodeBuilder.On("GetInterface").Return(nil) taskNodeBuilder.On("SetInterface", mock.AnythingOfType("*core.TypedInterface")).Return(nil) @@ -431,7 +431,7 @@ func TestValidateUnderlyingInterface(t *testing.T) { }).String() })).Return(&task, true) wfBuilder.On("GetOrCreateNodeBuilder", mock.MatchedBy(func(node *core.Node) bool { - return node.Id == "node_1" + return node.GetId() == "node_1" })).Return(taskNodeBuilder) // mock array node @@ -445,9 +445,9 @@ func TestValidateUnderlyingInterface(t *testing.T) { } nodeBuilder := mocks.NodeBuilder{} - nodeBuilder.On("GetArrayNode").Return(arrayNode.Target.(*core.Node_ArrayNode).ArrayNode) + nodeBuilder.On("GetArrayNode").Return(arrayNode.GetTarget().(*core.Node_ArrayNode).ArrayNode) nodeBuilder.On("GetCoreNode").Return(arrayNode) - nodeBuilder.On("GetId").Return(arrayNode.Id) + nodeBuilder.On("GetId").Return(arrayNode.GetId()) nodeBuilder.On("GetInterface").Return(nil) nodeBuilder.On("SetInterface", mock.Anything).Return() diff --git a/flytepropeller/pkg/compiler/validators/node.go b/flytepropeller/pkg/compiler/validators/node.go index ad43abdce3..1b8b97ae12 100644 --- a/flytepropeller/pkg/compiler/validators/node.go +++ b/flytepropeller/pkg/compiler/validators/node.go @@ -15,19 +15,19 @@ func validateEffectiveOutputParameters(n c.NodeBuilder, errs errors.CompileError params *flyte.VariableMap, ok bool) { aliases := make(map[string]string, len(n.GetOutputAliases())) for _, alias := range n.GetOutputAliases() { - if _, found := aliases[alias.Var]; found { - errs.Collect(errors.NewDuplicateAliasErr(n.GetId(), alias.Alias)) + if _, found := aliases[alias.GetVar()]; found { + errs.Collect(errors.NewDuplicateAliasErr(n.GetId(), alias.GetAlias())) } else { - aliases[alias.Var] = alias.Alias + aliases[alias.GetVar()] = alias.GetAlias() } } if n.GetInterface() != nil { params = &flyte.VariableMap{ - Variables: make(map[string]*flyte.Variable, len(n.GetInterface().GetOutputs().Variables)), + Variables: make(map[string]*flyte.Variable, len(n.GetInterface().GetOutputs().GetVariables())), } - for paramName, param := range n.GetInterface().GetOutputs().Variables { + for paramName, param := range n.GetInterface().GetOutputs().GetVariables() { if alias, found := aliases[paramName]; found { if newParam, paramOk := withVariableName(param); paramOk { params.Variables[alias] = newParam @@ -57,19 +57,19 @@ func branchNodeIDFormatter(parentNodeID, thenNodeID string) string { func ValidateBranchNode(w c.WorkflowBuilder, n c.NodeBuilder, requireParamType bool, errs errors.CompileErrors) ( discoveredNodes []c.NodeBuilder, ok bool) { - cases := make([]*flyte.IfBlock, 0, len(n.GetBranchNode().IfElse.Other)+1) - if n.GetBranchNode().IfElse.Case == nil { + cases := make([]*flyte.IfBlock, 0, len(n.GetBranchNode().GetIfElse().GetOther())+1) + if n.GetBranchNode().GetIfElse().GetCase() == nil { errs.Collect(errors.NewBranchNodeHasNoCondition(n.GetId())) } else { - cases = append(cases, n.GetBranchNode().IfElse.Case) + cases = append(cases, n.GetBranchNode().GetIfElse().GetCase()) } - cases = append(cases, n.GetBranchNode().IfElse.Other...) + cases = append(cases, n.GetBranchNode().GetIfElse().GetOther()...) discoveredNodes = make([]c.NodeBuilder, 0, len(cases)) subNodes := make([]c.NodeBuilder, 0, len(cases)+1) for _, block := range cases { // Validate condition - ValidateBooleanExpression(w, n, block.Condition, requireParamType, errs.NewScope()) + ValidateBooleanExpression(w, n, block.GetCondition(), requireParamType, errs.NewScope()) if block.GetThenNode() == nil { errs.Collect(errors.NewBranchNodeNotSpecified(n.GetId())) @@ -79,10 +79,10 @@ func ValidateBranchNode(w c.WorkflowBuilder, n c.NodeBuilder, requireParamType b } } - if elseNode := n.GetBranchNode().IfElse.GetElseNode(); elseNode != nil { + if elseNode := n.GetBranchNode().GetIfElse().GetElseNode(); elseNode != nil { wrapperNode := w.GetOrCreateNodeBuilder(elseNode) subNodes = append(subNodes, wrapperNode) - } else if defaultElse := n.GetBranchNode().IfElse.GetDefault(); defaultElse == nil { + } else if defaultElse := n.GetBranchNode().GetIfElse().GetDefault(); defaultElse == nil { errs.Collect(errors.NewBranchNodeHasNoDefault(n.GetId())) } @@ -126,7 +126,7 @@ func ValidateNode(w c.WorkflowBuilder, n c.NodeBuilder, validateConditionTypes b } // Order upstream node ids to ensure consistent output of the compiler even if client ordering changes. - sort.Strings(n.GetCoreNode().UpstreamNodeIds) + sort.Strings(n.GetCoreNode().GetUpstreamNodeIds()) // Validate branch node conditions and inner nodes. if n.GetBranchNode() != nil { diff --git a/flytepropeller/pkg/compiler/validators/node_test.go b/flytepropeller/pkg/compiler/validators/node_test.go index 3982b71344..642f568593 100644 --- a/flytepropeller/pkg/compiler/validators/node_test.go +++ b/flytepropeller/pkg/compiler/validators/node_test.go @@ -64,7 +64,7 @@ func TestValidateNode(t *testing.T) { coreN.UpstreamNodeIds = []string{"n1", "n0"} n.OnGetCoreNode().Return(coreN) n.On("GetUpstreamNodeIds").Return(func() []string { - return coreN.UpstreamNodeIds + return coreN.GetUpstreamNodeIds() }) wf := &mocks.WorkflowBuilder{} diff --git a/flytepropeller/pkg/compiler/validators/typing.go b/flytepropeller/pkg/compiler/validators/typing.go index 2bde60b47b..ca1ca03148 100644 --- a/flytepropeller/pkg/compiler/validators/typing.go +++ b/flytepropeller/pkg/compiler/validators/typing.go @@ -1,11 +1,18 @@ package validators import ( + "bytes" + "context" + "encoding/json" "strings" structpb "github.com/golang/protobuf/ptypes/struct" + "github.com/santhosh-tekuri/jsonschema" + "github.com/wI2L/jsondiff" + jscmp "gitlab.com/yvesf/json-schema-compare" flyte "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flyte/flytestdlib/logger" ) type typeChecker interface { @@ -16,6 +23,85 @@ type trivialChecker struct { literalType *flyte.LiteralType } +func isSuperTypeInJSON(sourceMetaData, targetMetaData *structpb.Struct) bool { + // Check if the source schema is a supertype of the target schema, beyond simple inheritance. + // For custom types, we expect the JSON schemas in the metadata to come from the same JSON schema package, + // specifically draft 2020-12 from Mashumaro. + + srcSchemaBytes, err := json.Marshal(sourceMetaData.GetFields()) + if err != nil { + logger.Infof(context.Background(), "Failed to marshal source metadata: [%v]", err) + return false + } + tgtSchemaBytes, err := json.Marshal(targetMetaData.GetFields()) + if err != nil { + logger.Infof(context.Background(), "Failed to marshal target metadata: [%v]", err) + return false + } + + compiler := jsonschema.NewCompiler() + + err = compiler.AddResource("src", bytes.NewReader(srcSchemaBytes)) + if err != nil { + logger.Infof(context.Background(), "Failed to add resource to compiler: [%v]", err) + return false + } + err = compiler.AddResource("tgt", bytes.NewReader(tgtSchemaBytes)) + if err != nil { + logger.Infof(context.Background(), "Failed to add resource to compiler: [%v]", err) + return false + } + + srcSchema, err := compiler.Compile("src") + if err != nil { + logger.Infof(context.Background(), "Failed to compile source schema: [%v]", err) + return false + } + tgtSchema, err := compiler.Compile("tgt") + if err != nil { + logger.Infof(context.Background(), "Failed to compile target schema: [%v]", err) + return false + } + + // Compare the two schemas + errs := jscmp.Compare(tgtSchema, srcSchema) + + // Ignore the "not implemented" errors from json-schema-compare (additionalProperties, additionalItems, etc.) + // While handling nested structures, we might have multiple "not implemented" errors for a single field as well. + // If all the errors are "not implemented", we can consider the source schema as a supertype of the target schema. + for _, err := range errs { + if !strings.Contains(err.Error(), "not implemented") { + return false + } + } + + return true +} + +func isSameTypeInJSON(sourceMetaData, targetMetaData *structpb.Struct) bool { + srcSchemaBytes, err := json.Marshal(sourceMetaData.GetFields()) + if err != nil { + logger.Infof(context.Background(), "Failed to marshal source metadata: [%v]", err) + return false + } + + tgtSchemaBytes, err := json.Marshal(targetMetaData.GetFields()) + if err != nil { + logger.Infof(context.Background(), "Failed to marshal target metadata: [%v]", err) + return false + } + + // Use jsondiff to compare the two schemas + patch, err := jsondiff.CompareJSON(srcSchemaBytes, tgtSchemaBytes) + if err != nil { + logger.Infof(context.Background(), "Failed to compare JSON schemas: [%v]", err) + return false + } + + // If the length of the patch is zero, the two JSON structures are identical + return len(patch) == 0 +} + // CastsFrom is a trivial type checker merely checks if types match exactly. func (t trivialChecker) CastsFrom(upstreamType *flyte.LiteralType) bool { // If upstream is an enum, it can be consumed as a string downstream @@ -35,6 +121,23 @@ func (t trivialChecker) CastsFrom(upstreamType *flyte.LiteralType) bool { return false } + // Related Issue: https://github.com/flyteorg/flyte/issues/5489 + // RFC: https://github.com/flyteorg/flyte/blob/master/rfc/system/5741-binary-idl-with-message-pack.md#flytepropeller + if upstreamType.GetSimple() == flyte.SimpleType_STRUCT && t.literalType.GetSimple() == flyte.SimpleType_STRUCT { + // Json Schema is stored in Metadata + upstreamMetaData := upstreamType.GetMetadata() + downstreamMetaData := t.literalType.GetMetadata() + + // There's bug in flytekit's dataclass Transformer to generate JSON Scheam before, + // in some case, we the JSON Schema will be nil, so we can only pass it to support + // backward compatible. (reference task should be supported.) + if upstreamMetaData == nil || downstreamMetaData == nil { + return true + } + + return isSameTypeInJSON(upstreamMetaData, downstreamMetaData) || isSuperTypeInJSON(upstreamMetaData, downstreamMetaData) + } + // Ignore metadata when comparing types. upstreamTypeCopy := *upstreamType downstreamTypeCopy := *t.literalType @@ -136,7 +239,7 @@ func (t schemaTypeChecker) CastsFrom(upstreamType *flyte.LiteralType) bool { } // Flyte Schema can only be serialized to parquet - if len(structuredDatasetType.Format) != 0 && !strings.EqualFold(structuredDatasetType.Format, "parquet") { + if len(structuredDatasetType.GetFormat()) != 0 && !strings.EqualFold(structuredDatasetType.GetFormat(), "parquet") { return false } @@ -168,7 +271,7 @@ func (t structuredDatasetChecker) CastsFrom(upstreamType *flyte.LiteralType) boo } if schemaType != nil { // Flyte Schema can only be serialized to parquet - format := t.literalType.GetStructuredDatasetType().Format + format := t.literalType.GetStructuredDatasetType().GetFormat() if len(format) != 0 && !strings.EqualFold(format, "parquet") { return false } @@ -179,22 +282,22 @@ func (t structuredDatasetChecker) CastsFrom(upstreamType *flyte.LiteralType) boo // Upstream (schema) -> downstream (schema) func schemaCastFromSchema(upstream *flyte.SchemaType, downstream *flyte.SchemaType) bool { - if len(upstream.Columns) == 0 || len(downstream.Columns) == 0 { + if len(upstream.GetColumns()) == 0 || len(downstream.GetColumns()) == 0 { return true } nameToTypeMap := make(map[string]flyte.SchemaType_SchemaColumn_SchemaColumnType) - for _, column := range upstream.Columns { - nameToTypeMap[column.Name] = column.Type + for _, column := range upstream.GetColumns() { + nameToTypeMap[column.GetName()] = column.GetType() } // Check that the downstream schema is a strict sub-set of the upstream schema. - for _, column := range downstream.Columns { - upstreamType, ok := nameToTypeMap[column.Name] + for _, column := range downstream.GetColumns() { + upstreamType, ok := nameToTypeMap[column.GetName()] if !ok { return false } - if upstreamType != column.Type { + if upstreamType != column.GetType() { return false } } @@ -244,26 +347,26 @@ func (t unionTypeChecker) CastsFrom(upstreamType *flyte.LiteralType) bool { // Upstream (structuredDatasetType) -> downstream (structuredDatasetType) func structuredDatasetCastFromStructuredDataset(upstream *flyte.StructuredDatasetType, downstream *flyte.StructuredDatasetType) bool { // Skip the format check here when format is empty. https://github.com/flyteorg/flyte/issues/2864 - if len(upstream.Format) != 0 && len(downstream.Format) != 0 && !strings.EqualFold(upstream.Format, downstream.Format) { + if len(upstream.GetFormat()) != 0 && len(downstream.GetFormat()) != 0 && !strings.EqualFold(upstream.GetFormat(), downstream.GetFormat()) { return false } - if len(upstream.Columns) == 0 || len(downstream.Columns) == 0 { + if len(upstream.GetColumns()) == 0 || len(downstream.GetColumns()) == 0 { return true } nameToTypeMap := make(map[string]*flyte.LiteralType) - for _, column := range upstream.Columns { - nameToTypeMap[column.Name] = column.LiteralType + for _, column := range upstream.GetColumns() { + nameToTypeMap[column.GetName()] = column.GetLiteralType() } // Check that the downstream structured dataset is a strict sub-set of the upstream structured dataset. - for _, column := range downstream.Columns { - upstreamType, ok := nameToTypeMap[column.Name] + for _, column := range downstream.GetColumns() { + upstreamType, ok := nameToTypeMap[column.GetName()] if !ok { return false } - if !getTypeChecker(column.LiteralType).CastsFrom(upstreamType) { + if !getTypeChecker(column.GetLiteralType()).CastsFrom(upstreamType) { return false } } @@ -272,21 +375,21 @@ func structuredDatasetCastFromStructuredDataset(upstream *flyte.StructuredDatase // Upstream (schemaType) -> downstream (structuredDatasetType) func structuredDatasetCastFromSchema(upstream *flyte.SchemaType, downstream *flyte.StructuredDatasetType) bool { - if len(upstream.Columns) == 0 || len(downstream.Columns) == 0 { + if len(upstream.GetColumns()) == 0 || len(downstream.GetColumns()) == 0 { return true } nameToTypeMap := make(map[string]flyte.SchemaType_SchemaColumn_SchemaColumnType) - for _, column := range upstream.Columns { - nameToTypeMap[column.Name] = column.GetType() + for _, column := range upstream.GetColumns() { + nameToTypeMap[column.GetName()] = column.GetType() } // Check that the downstream structuredDataset is a strict sub-set of the upstream schema. - for _, column := range downstream.Columns { - upstreamType, ok := nameToTypeMap[column.Name] + for _, column := range downstream.GetColumns() { + upstreamType, ok := nameToTypeMap[column.GetName()] if !ok { return false } - if !schemaTypeIsMatchStructuredDatasetType(upstreamType, column.LiteralType.GetSimple()) { + if !schemaTypeIsMatchStructuredDatasetType(upstreamType, column.GetLiteralType().GetSimple()) { return false } } @@ -295,17 +398,17 @@ func structuredDatasetCastFromSchema(upstream *flyte.SchemaType, downstream *fly // Upstream (structuredDatasetType) -> downstream (schemaType) func schemaCastFromStructuredDataset(upstream *flyte.StructuredDatasetType, downstream *flyte.SchemaType) bool { - if len(upstream.Columns) == 0 || len(downstream.Columns) == 0 { + if len(upstream.GetColumns()) == 0 || len(downstream.GetColumns()) == 0 { return true } nameToTypeMap := make(map[string]flyte.SimpleType) - for _, column := range upstream.Columns { - nameToTypeMap[column.Name] = column.LiteralType.GetSimple() + for _, column := range upstream.GetColumns() { + nameToTypeMap[column.GetName()] = column.GetLiteralType().GetSimple() } // Check that the downstream schema is a strict sub-set of the upstream structuredDataset. - for _, column := range downstream.Columns { - upstreamType, ok := nameToTypeMap[column.Name] + for _, column := range downstream.GetColumns() { + upstreamType, ok := nameToTypeMap[column.GetName()] if !ok { return false } diff --git a/flytepropeller/pkg/compiler/validators/typing_test.go b/flytepropeller/pkg/compiler/validators/typing_test.go index f2e407b986..2f5bc5531d 100644 --- a/flytepropeller/pkg/compiler/validators/typing_test.go +++ b/flytepropeller/pkg/compiler/validators/typing_test.go @@ -5,6 +5,7 @@ import ( structpb "github.com/golang/protobuf/ptypes/struct" "github.com/stretchr/testify/assert" + structpb2 "google.golang.org/protobuf/types/known/structpb" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" ) @@ -557,6 +558,837 @@ func TestMapCasting(t *testing.T) { assert.True(t, castable, "castable from Struct to struct") }) + t.Run("SameDataclassOneLevel(draft 2020-12)", func(t *testing.T) { + /* + @dataclass + class A: + a: int + */ + castable := AreTypesCastable( + &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_STRUCT, + }, + Metadata: &structpb.Struct{ + Fields: map[string]*structpb2.Value{ + "required": &structpb.Value{ + Kind: &structpb.Value_ListValue{ + ListValue: &structpb.ListValue{ + Values: []*structpb.Value{{Kind: &structpb.Value_StringValue{StringValue: "a"}}}, + }, + }, + }, + "title": &structpb.Value{ + Kind: &structpb.Value_StringValue{StringValue: "A"}, + }, + "properties": &structpb.Value{ + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "a": { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "type": { + Kind: &structpb.Value_StringValue{StringValue: "integer"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "type": { + Kind: &structpb.Value_StringValue{StringValue: "object"}, + }, + "AdditionalProperties": { + Kind: &structpb.Value_BoolValue{BoolValue: false}, + }, + }, + }, + }, + &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_STRUCT, + }, + Metadata: &structpb.Struct{ + Fields: map[string]*structpb2.Value{ + "required": &structpb.Value{ + Kind: &structpb.Value_ListValue{ + ListValue: &structpb.ListValue{ + Values: []*structpb.Value{{Kind: &structpb.Value_StringValue{StringValue: "a"}}}, + }, + }, + }, + "title": &structpb.Value{ + Kind: &structpb.Value_StringValue{StringValue: "A"}, + }, + "properties": &structpb.Value{ + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "a": { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "type": { + Kind: &structpb.Value_StringValue{StringValue: "integer"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "type": { + Kind: &structpb.Value_StringValue{StringValue: "object"}, + }, + "AdditionalProperties": { + Kind: &structpb.Value_BoolValue{BoolValue: false}, + }, + }, + }, + }, + ) + assert.True(t, castable, "same dataclass castable with one level properties") + }) + + t.Run("SameDataclassTwoLevel(draft 2020-12)", func(t *testing.T) { + /* + @dataclass + class A: + a: int + + @dataclass + class B: + b: A + */ + + castable := AreTypesCastable( + &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_STRUCT, + }, + Metadata: &structpb.Struct{ + Fields: map[string]*structpb2.Value{ + "required": &structpb.Value{ + Kind: &structpb.Value_ListValue{ + ListValue: &structpb.ListValue{ + Values: []*structpb.Value{{Kind: &structpb.Value_StringValue{StringValue: "b"}}}, + }, + }, + }, + "title": &structpb.Value{ + Kind: &structpb.Value_StringValue{StringValue: "B"}, + }, + "properties": &structpb.Value{ + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "b": { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "required": { + Kind: &structpb.Value_ListValue{ + ListValue: &structpb.ListValue{ + Values: []*structpb.Value{{Kind: &structpb.Value_StringValue{StringValue: "a"}}}, + }, + }, + }, + "title": { + Kind: &structpb.Value_StringValue{StringValue: "A"}, + }, + "properties": { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "a": { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "type": { + Kind: &structpb.Value_StringValue{StringValue: "integer"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "type": { + Kind: &structpb.Value_StringValue{StringValue: "object"}, + }, + "AdditionalProperties": { + Kind: &structpb.Value_BoolValue{BoolValue: false}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "type": { + Kind: &structpb.Value_StringValue{StringValue: "object"}, + }, + "AdditionalProperties": { + Kind: &structpb.Value_BoolValue{BoolValue: false}, + }, + }, + }, + }, + &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_STRUCT, + }, + Metadata: &structpb.Struct{ + Fields: map[string]*structpb2.Value{ + "required": &structpb.Value{ + Kind: &structpb.Value_ListValue{ + ListValue: &structpb.ListValue{ + Values: []*structpb.Value{{Kind: &structpb.Value_StringValue{StringValue: "b"}}}, + }, + }, + }, + "title": &structpb.Value{ + Kind: &structpb.Value_StringValue{StringValue: "B"}, + }, + "properties": &structpb.Value{ + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "b": { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "required": { + Kind: &structpb.Value_ListValue{ + ListValue: &structpb.ListValue{ + Values: []*structpb.Value{{Kind: &structpb.Value_StringValue{StringValue: "a"}}}, + }, + }, + }, + "title": { + Kind: &structpb.Value_StringValue{StringValue: "A"}, + }, + "properties": { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "a": { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "type": { + Kind: &structpb.Value_StringValue{StringValue: "integer"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "type": { + Kind: &structpb.Value_StringValue{StringValue: "object"}, + }, + "AdditionalProperties": { + Kind: &structpb.Value_BoolValue{BoolValue: false}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "type": { + Kind: &structpb.Value_StringValue{StringValue: "object"}, + }, + "AdditionalProperties": { + Kind: &structpb.Value_BoolValue{BoolValue: false}, + }, + }, + }, + }, + ) + assert.True(t, castable, "same dataclass castable with two level properties") + }) + + t.Run("DiffDataclassTwoLevel(draft 2020-12)", func(t *testing.T) { + /* + @dataclass + class A: + a: int + + @dataclass + class B: + b: A + + @dataclass + class C: + c: str + + @dataclass + class D: + d: C + + Compare B and D + */ + + castable := AreTypesCastable( + &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_STRUCT, + }, + Metadata: &structpb.Struct{ + Fields: map[string]*structpb2.Value{ + "required": &structpb.Value{ + Kind: &structpb.Value_ListValue{ + ListValue: &structpb.ListValue{ + Values: []*structpb.Value{{Kind: &structpb.Value_StringValue{StringValue: "b"}}}, + }, + }, + }, + "title": &structpb.Value{ + Kind: &structpb.Value_StringValue{StringValue: "B"}, + }, + "properties": &structpb.Value{ + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "b": { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "required": { + Kind: &structpb.Value_ListValue{ + ListValue: &structpb.ListValue{ + Values: []*structpb.Value{{Kind: &structpb.Value_StringValue{StringValue: "a"}}}, + }, + }, + }, + "title": { + Kind: &structpb.Value_StringValue{StringValue: "A"}, + }, + "properties": { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "a": { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "type": { + Kind: &structpb.Value_StringValue{StringValue: "integer"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "type": { + Kind: &structpb.Value_StringValue{StringValue: "object"}, + }, + "AdditionalProperties": { + Kind: &structpb.Value_BoolValue{BoolValue: false}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "type": { + Kind: &structpb.Value_StringValue{StringValue: "object"}, + }, + "AdditionalProperties": { + Kind: &structpb.Value_BoolValue{BoolValue: false}, + }, + }, + }, + }, + &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_STRUCT, + }, + Metadata: &structpb.Struct{ + Fields: map[string]*structpb2.Value{ + "required": &structpb.Value{ + Kind: &structpb.Value_ListValue{ + ListValue: &structpb.ListValue{ + Values: []*structpb.Value{{Kind: &structpb.Value_StringValue{StringValue: "d"}}}, + }, + }, + }, + "title": &structpb.Value{ + Kind: &structpb.Value_StringValue{StringValue: "D"}, + }, + "properties": &structpb.Value{ + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "d": { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "required": { + Kind: &structpb.Value_ListValue{ + ListValue: &structpb.ListValue{ + Values: []*structpb.Value{{Kind: &structpb.Value_StringValue{StringValue: "c"}}}, + }, + }, + }, + "title": { + Kind: &structpb.Value_StringValue{StringValue: "C"}, + }, + "properties": { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "c": { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "type": { + Kind: &structpb.Value_StringValue{StringValue: "string"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "type": { + Kind: &structpb.Value_StringValue{StringValue: "object"}, + }, + "AdditionalProperties": { + Kind: &structpb.Value_BoolValue{BoolValue: false}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "type": { + Kind: &structpb.Value_StringValue{StringValue: "object"}, + }, + "AdditionalProperties": { + Kind: &structpb.Value_BoolValue{BoolValue: false}, + }, + }, + }, + }, + ) + assert.False(t, castable, "different dataclass with two level properties not castable") + }) + + t.Run("SameBaseModelOneLevel(draft 2020-12)", func(t *testing.T) { + /* + class A(BaseModel): + a: int + */ + castable := AreTypesCastable( + &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_STRUCT, + }, + Metadata: &structpb.Struct{ + Fields: map[string]*structpb2.Value{ + "required": &structpb.Value{ + Kind: &structpb.Value_ListValue{ + ListValue: &structpb.ListValue{ + Values: []*structpb.Value{{Kind: &structpb.Value_StringValue{StringValue: "a"}}}, + }, + }, + }, + "title": &structpb.Value{ + Kind: &structpb.Value_StringValue{StringValue: "A"}, + }, + "properties": &structpb.Value{ + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "a": { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "type": { + Kind: &structpb.Value_StringValue{StringValue: "integer"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "type": { + Kind: &structpb.Value_StringValue{StringValue: "object"}, + }, + }, + }, + }, + &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_STRUCT, + }, + Metadata: &structpb.Struct{ + Fields: map[string]*structpb2.Value{ + "required": &structpb.Value{ + Kind: &structpb.Value_ListValue{ + ListValue: &structpb.ListValue{ + Values: []*structpb.Value{{Kind: &structpb.Value_StringValue{StringValue: "a"}}}, + }, + }, + }, + "title": &structpb.Value{ + Kind: &structpb.Value_StringValue{StringValue: "A"}, + }, + "properties": &structpb.Value{ + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "a": { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "type": { + Kind: &structpb.Value_StringValue{StringValue: "integer"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "type": { + Kind: &structpb.Value_StringValue{StringValue: "object"}, + }, + }, + }, + }, + ) + assert.True(t, castable, "same basemodel castable with one level properties") + }) + + t.Run("BigToSmallAndChildToParent(dataclass draft 2020-12)", func(t *testing.T) { + /* + @dataclass + class A: + a: int + + @dataclass + class B(A): + b: Optional[str] = None + */ + castable := AreTypesCastable( + &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_STRUCT, + }, + Metadata: &structpb.Struct{ + Fields: map[string]*structpb2.Value{ + "required": &structpb.Value{ + Kind: &structpb.Value_ListValue{ + ListValue: &structpb.ListValue{ + Values: []*structpb.Value{ + {Kind: &structpb.Value_StringValue{StringValue: "a"}}, + {Kind: &structpb.Value_StringValue{StringValue: "b"}}, + }, + }, + }, + }, + "title": &structpb.Value{ + Kind: &structpb.Value_StringValue{StringValue: "B"}, + }, + "properties": &structpb.Value{ + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "a": { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "type": { + Kind: &structpb.Value_StringValue{StringValue: "integer"}, + }, + }, + }, + }, + }, + "b": { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "default": { + Kind: &structpb.Value_NullValue{}, + }, + "anyOf": { + Kind: &structpb.Value_ListValue{ + ListValue: &structpb.ListValue{ + Values: []*structpb.Value{ + { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "type": { + Kind: &structpb.Value_StringValue{StringValue: "string"}, + }, + }, + }, + }, + }, + { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "type": { + Kind: &structpb.Value_StringValue{StringValue: "null"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "type": { + Kind: &structpb.Value_StringValue{StringValue: "object"}, + }, + "AdditionalProperties": { + Kind: &structpb.Value_BoolValue{BoolValue: false}, + }, + }, + }, + }, + &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_STRUCT, + }, + Metadata: &structpb.Struct{ + Fields: map[string]*structpb2.Value{ + "required": &structpb.Value{ + Kind: &structpb.Value_ListValue{ + ListValue: &structpb.ListValue{ + Values: []*structpb.Value{{Kind: &structpb.Value_StringValue{StringValue: "a"}}}, + }, + }, + }, + "title": &structpb.Value{ + Kind: &structpb.Value_StringValue{StringValue: "A"}, + }, + "properties": &structpb.Value{ + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "a": { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "type": { + Kind: &structpb.Value_StringValue{StringValue: "integer"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "type": { + Kind: &structpb.Value_StringValue{StringValue: "object"}, + }, + "AdditionalProperties": { + Kind: &structpb.Value_BoolValue{BoolValue: false}, + }, + }, + }, + }, + ) + assert.True(t, castable, "dataclass castable from child to parent (big to small)") + }) + + t.Run("SmallToBigAndParentToChild(dataclass draft 2020-12)", func(t *testing.T) { + /* + @dataclass + class A: + a: int + + @dataclass + class B(A): + b: Optional[str] = None + */ + castable := AreTypesCastable( + &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_STRUCT, + }, + Metadata: &structpb.Struct{ + Fields: map[string]*structpb2.Value{ + "required": &structpb.Value{ + Kind: &structpb.Value_ListValue{ + ListValue: &structpb.ListValue{ + Values: []*structpb.Value{{Kind: &structpb.Value_StringValue{StringValue: "a"}}}, + }, + }, + }, + "title": &structpb.Value{ + Kind: &structpb.Value_StringValue{StringValue: "A"}, + }, + "properties": &structpb.Value{ + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "a": { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "type": { + Kind: &structpb.Value_StringValue{StringValue: "integer"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "type": { + Kind: &structpb.Value_StringValue{StringValue: "object"}, + }, + "AdditionalProperties": { + Kind: &structpb.Value_BoolValue{BoolValue: false}, + }, + }, + }, + }, + &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_STRUCT, + }, + Metadata: &structpb.Struct{ + Fields: map[string]*structpb2.Value{ + "required": &structpb.Value{ + Kind: &structpb.Value_ListValue{ + ListValue: &structpb.ListValue{ + Values: []*structpb.Value{ + {Kind: &structpb.Value_StringValue{StringValue: "a"}}, + {Kind: &structpb.Value_StringValue{StringValue: "b"}}, + }, + }, + }, + }, + "title": &structpb.Value{ + Kind: &structpb.Value_StringValue{StringValue: "B"}, + }, + "properties": &structpb.Value{ + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "a": { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "type": { + Kind: &structpb.Value_StringValue{StringValue: "integer"}, + }, + }, + }, + }, + }, + "b": { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "default": { + Kind: &structpb.Value_NullValue{}, + }, + "anyOf": { + Kind: &structpb.Value_ListValue{ + ListValue: &structpb.ListValue{ + Values: []*structpb.Value{ + { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "type": { + Kind: &structpb.Value_StringValue{StringValue: "string"}, + }, + }, + }, + }, + }, + { + Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "type": { + Kind: &structpb.Value_StringValue{StringValue: "null"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "type": { + Kind: &structpb.Value_StringValue{StringValue: "object"}, + }, + "AdditionalProperties": { + Kind: &structpb.Value_BoolValue{BoolValue: false}, + }, + }, + }, + }, + ) + assert.False(t, castable, "dataclass not castable from parent to child (small to big)") + }) + t.Run("MismatchedMapNestLevels_Scalar", func(t *testing.T) { castable := AreTypesCastable( &core.LiteralType{ diff --git a/flytepropeller/pkg/compiler/validators/utils.go b/flytepropeller/pkg/compiler/validators/utils.go index cbb14b3124..e8f0089c14 100644 --- a/flytepropeller/pkg/compiler/validators/utils.go +++ b/flytepropeller/pkg/compiler/validators/utils.go @@ -14,7 +14,7 @@ import ( func containsBindingByVariableName(bindings []*core.Binding, name string) (found bool) { for _, b := range bindings { - if b.Var == name { + if b.GetVar() == name { return true } } @@ -27,7 +27,7 @@ func findVariableByName(vars *core.VariableMap, name string) (variable *core.Var return nil, false } - variable, found = vars.Variables[name] + variable, found = vars.GetVariables()[name] return } @@ -48,7 +48,7 @@ func literalTypeForScalar(scalar *core.Scalar) *core.LiteralType { // If the binary has a tag, treat it as a structured type (e.g., dict, dataclass, Pydantic BaseModel). // Otherwise, treat it as raw binary data. // Reference: https://github.com/flyteorg/flyte/blob/master/rfc/system/5741-binary-idl-with-message-pack.md - if v.Binary.Tag == coreutils.MESSAGEPACK { + if v.Binary.GetTag() == coreutils.MESSAGEPACK { literalType = &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_STRUCT}} } else { literalType = &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_BINARY}} @@ -56,11 +56,11 @@ func literalTypeForScalar(scalar *core.Scalar) *core.LiteralType { case *core.Scalar_Schema: literalType = &core.LiteralType{ Type: &core.LiteralType_Schema{ - Schema: scalar.GetSchema().Type, + Schema: scalar.GetSchema().GetType(), }, } case *core.Scalar_StructuredDataset: - if v.StructuredDataset == nil || v.StructuredDataset.Metadata == nil { + if v.StructuredDataset == nil || v.StructuredDataset.GetMetadata() == nil { return &core.LiteralType{ Type: &core.LiteralType_StructuredDatasetType{}, } @@ -68,7 +68,7 @@ func literalTypeForScalar(scalar *core.Scalar) *core.LiteralType { literalType = &core.LiteralType{ Type: &core.LiteralType_StructuredDatasetType{ - StructuredDatasetType: scalar.GetStructuredDataset().GetMetadata().StructuredDatasetType, + StructuredDatasetType: scalar.GetStructuredDataset().GetMetadata().GetStructuredDatasetType(), }, } case *core.Scalar_NoneType: @@ -115,9 +115,9 @@ func literalTypeForPrimitive(primitive *core.Primitive) *core.LiteralType { } func buildVariablesIndex(params *core.VariableMap) (map[string]*core.Variable, sets.String) { - paramMap := make(map[string]*core.Variable, len(params.Variables)) + paramMap := make(map[string]*core.Variable, len(params.GetVariables())) paramSet := sets.NewString() - for paramName, param := range params.Variables { + for paramName, param := range params.GetVariables() { paramMap[paramName] = param paramSet.Insert(paramName) } @@ -130,7 +130,7 @@ func filterVariables(vars *core.VariableMap, varNames sets.String) *core.Variabl Variables: make(map[string]*core.Variable, len(varNames)), } - for paramName, param := range vars.Variables { + for paramName, param := range vars.GetVariables() { if varNames.Has(paramName) { res.Variables[paramName] = param } @@ -158,9 +158,9 @@ func UnionDistinctVariableMaps(m1, m2 map[string]*core.Variable) (map[string]*co for k, v := range m2 { if existingV, exists := res[k]; exists { - if v.Type.String() != existingV.Type.String() { + if v.GetType().String() != existingV.GetType().String() { return nil, fmt.Errorf("key already exists with a different type. %v has type [%v] on one side "+ - "and type [%v] on the other", k, existingV.Type.String(), v.Type.String()) + "and type [%v] on the other", k, existingV.GetType().String(), v.GetType().String()) } } @@ -178,7 +178,7 @@ func buildMultipleTypeUnion(innerType []*core.LiteralType) *core.LiteralType { unionType := x.GetCollectionType().GetUnionType() if unionType != nil { isNested = true - variants = append(variants, unionType.Variants...) + variants = append(variants, unionType.GetVariants()...) } else { variants = append(variants, x) } @@ -276,13 +276,13 @@ func LiteralTypeForLiteral(l *core.Literal) *core.LiteralType { case *core.Literal_Collection: return &core.LiteralType{ Type: &core.LiteralType_CollectionType{ - CollectionType: literalTypeForLiterals(l.GetCollection().Literals), + CollectionType: literalTypeForLiterals(l.GetCollection().GetLiterals()), }, } case *core.Literal_Map: return &core.LiteralType{ Type: &core.LiteralType_MapValueType{ - MapValueType: literalTypeForLiterals(maps.Values(l.GetMap().Literals)), + MapValueType: literalTypeForLiterals(maps.Values(l.GetMap().GetLiterals())), }, } case *core.Literal_OffloadedMetadata: diff --git a/flytepropeller/pkg/compiler/validators/utils_test.go b/flytepropeller/pkg/compiler/validators/utils_test.go index 09790849f3..0238c652e2 100644 --- a/flytepropeller/pkg/compiler/validators/utils_test.go +++ b/flytepropeller/pkg/compiler/validators/utils_test.go @@ -112,9 +112,9 @@ func TestLiteralTypeForLiterals(t *testing.T) { coreutils.MustMakeLiteral(2), }) - assert.Len(t, lt.GetUnionType().Variants, 2) - assert.Equal(t, core.SimpleType_INTEGER.String(), lt.GetUnionType().Variants[0].GetSimple().String()) - assert.Equal(t, core.SimpleType_STRING.String(), lt.GetUnionType().Variants[1].GetSimple().String()) + assert.Len(t, lt.GetUnionType().GetVariants(), 2) + assert.Equal(t, core.SimpleType_INTEGER.String(), lt.GetUnionType().GetVariants()[0].GetSimple().String()) + assert.Equal(t, core.SimpleType_STRING.String(), lt.GetUnionType().GetVariants()[1].GetSimple().String()) }) t.Run("non-homogenous ensure ordering", func(t *testing.T) { @@ -125,9 +125,9 @@ func TestLiteralTypeForLiterals(t *testing.T) { coreutils.MustMakeLiteral(2), }) - assert.Len(t, lt.GetUnionType().Variants, 2) - assert.Equal(t, core.SimpleType_INTEGER.String(), lt.GetUnionType().Variants[0].GetSimple().String()) - assert.Equal(t, core.SimpleType_STRING.String(), lt.GetUnionType().Variants[1].GetSimple().String()) + assert.Len(t, lt.GetUnionType().GetVariants(), 2) + assert.Equal(t, core.SimpleType_INTEGER.String(), lt.GetUnionType().GetVariants()[0].GetSimple().String()) + assert.Equal(t, core.SimpleType_STRING.String(), lt.GetUnionType().GetVariants()[1].GetSimple().String()) }) t.Run("list with mixed types", func(t *testing.T) { diff --git a/flytepropeller/pkg/compiler/validators/vars.go b/flytepropeller/pkg/compiler/validators/vars.go index e114dc4fc0..445dd258bd 100644 --- a/flytepropeller/pkg/compiler/validators/vars.go +++ b/flytepropeller/pkg/compiler/validators/vars.go @@ -48,12 +48,12 @@ func validateVarType(nodeID c.NodeID, paramName string, param *flyte.Variable, // Validate parameters have their required attributes set func validateVariables(nodeID c.NodeID, params *flyte.VariableMap, errs errors.CompileErrors) { - for paramName, param := range params.Variables { + for paramName, param := range params.GetVariables() { if len(paramName) == 0 { errs.Collect(errors.NewValueRequiredErr(nodeID, "paramName")) } - if param.Type == nil { + if param.GetType() == nil { errs.Collect(errors.NewValueRequiredErr(nodeID, "param.Type")) } } diff --git a/flytepropeller/pkg/compiler/workflow_compiler.go b/flytepropeller/pkg/compiler/workflow_compiler.go index 89e82ebd16..2cd5e9a65d 100644 --- a/flytepropeller/pkg/compiler/workflow_compiler.go +++ b/flytepropeller/pkg/compiler/workflow_compiler.go @@ -46,7 +46,7 @@ import ( // Updates workflows and tasks references to reflect the needed ones for this workflow (ignoring subworkflows) func (w *workflowBuilder) updateRequiredReferences() { - reqs := getRequirements(w.CoreWorkflow.Template, w.allSubWorkflows, false, errors.NewCompileErrors()) + reqs := getRequirements(w.GetCoreWorkflow().GetTemplate(), w.allSubWorkflows, false, errors.NewCompileErrors()) workflows := map[c.WorkflowIDKey]c.InterfaceProvider{} tasks := c.TaskIndex{} for _, workflowID := range reqs.launchPlanIds { @@ -167,8 +167,8 @@ func (w workflowBuilder) AddEdges(n c.NodeBuilder, edgeDirection c.EdgeDirection // Contains the main validation logic for the coreWorkflow. If successful, it'll build an executable Workflow. func (w workflowBuilder) ValidateWorkflow(fg *flyteWorkflow, errs errors.CompileErrors) (c.Workflow, bool) { - if len(fg.Template.Nodes) == 0 { - errs.Collect(errors.NewNoNodesFoundErr(fg.Template.Id.String())) + if len(fg.GetTemplate().GetNodes()) == 0 { + errs.Collect(errors.NewNoNodesFoundErr(fg.GetTemplate().GetId().String())) return nil, !errs.HasErrors() } @@ -183,25 +183,25 @@ func (w workflowBuilder) ValidateWorkflow(fg *flyteWorkflow, errs errors.Compile } var ok bool - if wf.CoreWorkflow.Template.Interface, ok = v.ValidateInterface(c.StartNodeID, wf.CoreWorkflow.Template.Interface, errs.NewScope()); !ok { + if wf.CoreWorkflow.Template.Interface, ok = v.ValidateInterface(c.StartNodeID, wf.GetCoreWorkflow().GetTemplate().GetInterface(), errs.NewScope()); !ok { return nil, !errs.HasErrors() } - checkpoint := make([]*core.Node, 0, len(fg.Template.Nodes)) - checkpoint = append(checkpoint, fg.Template.Nodes...) - fg.Template.Nodes = make([]*core.Node, 0, len(fg.Template.Nodes)) + checkpoint := make([]*core.Node, 0, len(fg.GetTemplate().GetNodes())) + checkpoint = append(checkpoint, fg.GetTemplate().GetNodes()...) + fg.Template.Nodes = make([]*core.Node, 0, len(fg.GetTemplate().GetNodes())) wf.GetCoreWorkflow().Connections = &core.ConnectionSet{ Downstream: make(map[string]*core.ConnectionSet_IdList), Upstream: make(map[string]*core.ConnectionSet_IdList), } globalInputNode, _ := wf.AddNode(wf.GetOrCreateNodeBuilder(startNode), errs) - globalInputNode.SetInterface(&core.TypedInterface{Outputs: wf.CoreWorkflow.Template.Interface.Inputs}) + globalInputNode.SetInterface(&core.TypedInterface{Outputs: wf.GetCoreWorkflow().GetTemplate().GetInterface().GetInputs()}) endNode := &core.Node{Id: c.EndNodeID} globalOutputNode, _ := wf.AddNode(wf.GetOrCreateNodeBuilder(endNode), errs) - globalOutputNode.SetInterface(&core.TypedInterface{Inputs: wf.CoreWorkflow.Template.Interface.Outputs}) - globalOutputNode.SetInputs(wf.CoreWorkflow.Template.Outputs) + globalOutputNode.SetInterface(&core.TypedInterface{Inputs: wf.GetCoreWorkflow().GetTemplate().GetInterface().GetOutputs()}) + globalOutputNode.SetInputs(wf.GetCoreWorkflow().GetTemplate().GetOutputs()) // Track top level nodes (a branch in a branch node is NOT a top level node). The final graph should ensure that all // top level nodes are executed before the end node. We do that by adding execution edges from leaf nodes that do not @@ -210,7 +210,7 @@ func (w workflowBuilder) ValidateWorkflow(fg *flyteWorkflow, errs errors.Compile // Add and validate all other nodes for _, n := range checkpoint { - topLevelNodes.Insert(n.Id) + topLevelNodes.Insert(n.GetId()) if node, addOk := wf.AddNode(wf.GetOrCreateNodeBuilder(n), errs.NewScope()); addOk { v.ValidateNode(&wf, node, false /* validateConditionTypes */, errs.NewScope()) } @@ -225,8 +225,8 @@ func (w workflowBuilder) ValidateWorkflow(fg *flyteWorkflow, errs errors.Compile wf.AddEdges(n, c.EdgeDirectionBidirectional, errs.NewScope()) } - if fg.Template.FailureNode != nil { - failureNode := fg.Template.FailureNode + if fg.GetTemplate().GetFailureNode() != nil { + failureNode := fg.GetTemplate().GetFailureNode() v.ValidateNode(&wf, wf.GetOrCreateNodeBuilder(failureNode), false, errs.NewScope()) wf.AddEdges(wf.GetOrCreateNodeBuilder(failureNode), c.EdgeDirectionUpstream, errs.NewScope()) } @@ -272,7 +272,7 @@ func (w workflowBuilder) ValidateWorkflow(fg *flyteWorkflow, errs errors.Compile // Validates that all requirements for the coreWorkflow and its subworkflows are present. func (w workflowBuilder) validateAllRequirements(errs errors.CompileErrors) bool { - reqs := getRequirements(w.CoreWorkflow.Template, w.allSubWorkflows, true, errs) + reqs := getRequirements(w.GetCoreWorkflow().GetTemplate(), w.allSubWorkflows, true, errs) for _, lp := range reqs.launchPlanIds { if _, ok := w.allLaunchPlans[lp.String()]; !ok { @@ -314,17 +314,17 @@ func CompileWorkflow(primaryWf *core.WorkflowTemplate, subworkflows []*core.Work uniqueTasks := sets.NewString() taskBuilders := make([]c.Task, 0, len(tasks)) for _, task := range tasks { - if task.Template == nil || task.Template.Id == nil { + if task.GetTemplate() == nil || task.GetTemplate().GetId() == nil { errs.Collect(errors.NewValueRequiredErr("task", "Template.Id")) return nil, errs } - if uniqueTasks.Has(task.Template.Id.String()) { + if uniqueTasks.Has(task.GetTemplate().GetId().String()) { continue } - taskBuilders = append(taskBuilders, &taskBuilder{flyteTask: task.Template}) - uniqueTasks.Insert(task.Template.Id.String()) + taskBuilders = append(taskBuilders, &taskBuilder{flyteTask: task.GetTemplate()}) + uniqueTasks.Insert(task.GetTemplate().GetId().String()) } // Validate overall requirements of the coreWorkflow. diff --git a/flytepropeller/pkg/compiler/workflow_compiler_test.go b/flytepropeller/pkg/compiler/workflow_compiler_test.go index 84d55aa342..8c9cefdc25 100644 --- a/flytepropeller/pkg/compiler/workflow_compiler_test.go +++ b/flytepropeller/pkg/compiler/workflow_compiler_test.go @@ -36,7 +36,7 @@ func dumpIdentifierNames(ids []common.Identifier) []string { res := make([]string, 0, len(ids)) for _, id := range ids { - res = append(res, id.Name) + res = append(res, id.GetName()) } return res @@ -98,7 +98,7 @@ func ExampleCompileWorkflow_basic() { for _, task := range tasks { compiledTask, err := CompileTask(task) if err != nil { - fmt.Printf("failed to compile task [%v]. Error: %v", task.Id, err) + fmt.Printf("failed to compile task [%v]. Error: %v", task.GetId(), err) return } @@ -106,7 +106,7 @@ func ExampleCompileWorkflow_basic() { } output, errs := CompileWorkflow(inputWorkflow, subWorkflows, compiledTasks, workflows) - fmt.Printf("Compiled Workflow in GraphViz: %v\n", visualize.ToGraphViz(output.Primary)) + fmt.Printf("Compiled Workflow in GraphViz: %v\n", visualize.ToGraphViz(output.GetPrimary())) fmt.Printf("Compile Errors: %v\n", errs) // Output: @@ -195,8 +195,8 @@ func TestCompileWorkflowWithFailureNode(t *testing.T) { } output, errs := CompileWorkflow(inputWorkflow, subWorkflows, compiledTasks, workflows) - assert.Equal(t, output.Primary.Template.FailureNode.Id, "FailureNode") - assert.NotNil(t, output.Primary.Template.FailureNode.GetTaskNode()) + assert.Equal(t, output.GetPrimary().GetTemplate().GetFailureNode().GetId(), "FailureNode") + assert.NotNil(t, output.GetPrimary().GetTemplate().GetFailureNode().GetTaskNode()) assert.Nil(t, errs) } @@ -287,7 +287,7 @@ func ExampleCompileWorkflow_inputsOutputsBinding() { for _, task := range inputTasks { compiledTask, err := CompileTask(task) if err != nil { - fmt.Printf("Failed to compile task [%v]. Error: %v", task.Id, err) + fmt.Printf("Failed to compile task [%v]. Error: %v", task.GetId(), err) return } @@ -298,7 +298,7 @@ func ExampleCompileWorkflow_inputsOutputsBinding() { if errs != nil { fmt.Printf("Compile Errors: %v\n", errs) } else { - fmt.Printf("Compiled Workflow in GraphViz: %v\n", visualize.ToGraphViz(output.Primary)) + fmt.Printf("Compiled Workflow in GraphViz: %v\n", visualize.ToGraphViz(output.GetPrimary())) } // Output: @@ -575,7 +575,7 @@ func TestValidateUnderlyingInterface(parentT *testing.T) { parentT.Run("TaskNode", func(t *testing.T) { errs := errors.NewCompileErrors() - iface, ifaceOk := v.ValidateUnderlyingInterface(&g, &nodeBuilder{flyteNode: inputWorkflow.Nodes[0]}, errs) + iface, ifaceOk := v.ValidateUnderlyingInterface(&g, &nodeBuilder{flyteNode: inputWorkflow.GetNodes()[0]}, errs) assert.True(t, ifaceOk) assert.False(t, errs.HasErrors()) assert.Equal(t, taskIface, iface) @@ -587,7 +587,7 @@ func TestValidateUnderlyingInterface(parentT *testing.T) { Target: &core.Node_WorkflowNode{ WorkflowNode: &core.WorkflowNode{ Reference: &core.WorkflowNode_SubWorkflowRef{ - SubWorkflowRef: inputWorkflow.Id, + SubWorkflowRef: inputWorkflow.GetId(), }, }, }, @@ -605,7 +605,7 @@ func TestValidateUnderlyingInterface(parentT *testing.T) { BranchNode: &core.BranchNode{ IfElse: &core.IfElseBlock{ Case: &core.IfBlock{ - ThenNode: inputWorkflow.Nodes[0], + ThenNode: inputWorkflow.GetNodes()[0], }, }, }, @@ -613,7 +613,7 @@ func TestValidateUnderlyingInterface(parentT *testing.T) { }}, errs) assert.True(t, ifaceOk) assert.False(t, errs.HasErrors()) - assert.Equal(t, taskIface.Outputs, iface.Outputs) + assert.Equal(t, taskIface.GetOutputs(), iface.GetOutputs()) }) branchT.Run("TwoCases", func(t *testing.T) { @@ -623,7 +623,7 @@ func TestValidateUnderlyingInterface(parentT *testing.T) { BranchNode: &core.BranchNode{ IfElse: &core.IfElseBlock{ Case: &core.IfBlock{ - ThenNode: inputWorkflow.Nodes[0], + ThenNode: inputWorkflow.GetNodes()[0], }, Other: []*core.IfBlock{ { @@ -631,7 +631,7 @@ func TestValidateUnderlyingInterface(parentT *testing.T) { Target: &core.Node_WorkflowNode{ WorkflowNode: &core.WorkflowNode{ Reference: &core.WorkflowNode_SubWorkflowRef{ - SubWorkflowRef: inputWorkflow.Id, + SubWorkflowRef: inputWorkflow.GetId(), }, }, }, @@ -720,9 +720,9 @@ func TestCompileWorkflow(t *testing.T) { assert.NoError(t, errs) assert.NotNil(t, output) if output != nil { - t.Logf("Graph Repr: %v", visualize.ToGraphViz(output.Primary)) + t.Logf("Graph Repr: %v", visualize.ToGraphViz(output.GetPrimary())) - assert.Equal(t, []string{"node_123"}, output.Primary.Connections.Upstream["node_456"].Ids) + assert.Equal(t, []string{"node_123"}, output.GetPrimary().GetConnections().GetUpstream()["node_456"].GetIds()) } } diff --git a/flytepropeller/pkg/controller/handler.go b/flytepropeller/pkg/controller/handler.go index 49c2c21549..3e9f7526fc 100644 --- a/flytepropeller/pkg/controller/handler.go +++ b/flytepropeller/pkg/controller/handler.go @@ -102,7 +102,7 @@ func (p *Propeller) TryMutateWorkflow(ctx context.Context, originalW *v1alpha1.F } ctx = contextutils.WithResourceVersion(ctx, mutableW.GetResourceVersion()) - maxRetries := uint32(p.cfg.MaxWorkflowRetries) + maxRetries := uint32(p.cfg.MaxWorkflowRetries) // #nosec G115 if IsDeleted(mutableW) || (mutableW.Status.FailedAttempts > maxRetries) { var err error func() { @@ -267,7 +267,7 @@ func (p *Propeller) parseWorkflowClosureCrdFields(ctx context.Context, dataRefer return nil, err } - wfClosureCrdFields, err := k8s.BuildWfClosureCrdFields(wfClosure.CompiledWorkflow) + wfClosureCrdFields, err := k8s.BuildWfClosureCrdFields(wfClosure.GetCompiledWorkflow()) if err != nil { logger.Errorf(ctx, "Failed to parse workflow closure data from '%s' with error '%s'", dataReference, err) return nil, err diff --git a/flytepropeller/pkg/controller/interfaces/rate_limiter.go b/flytepropeller/pkg/controller/interfaces/rate_limiter.go new file mode 100644 index 0000000000..576d9736c4 --- /dev/null +++ b/flytepropeller/pkg/controller/interfaces/rate_limiter.go @@ -0,0 +1,36 @@ +package interfaces + +import ( + "context" + "time" + + "golang.org/x/time/rate" +) + +//go:generate mockery-v2 --name Limiter --output ../mocks --case=snake --with-expecter +//go:generate mockery-v2 --name Reservation --output ../mocks --case=snake --with-expecter + +type Limiter interface { + Allow() bool + AllowN(t time.Time, n int) bool + Burst() int + Limit() rate.Limit + Reserve() Reservation + ReserveN(t time.Time, n int) Reservation + SetBurst(newBurst int) + SetBurstAt(t time.Time, newBurst int) + SetLimit(newLimit rate.Limit) + SetLimitAt(t time.Time, newLimit rate.Limit) + Tokens() float64 + TokensAt(t time.Time) float64 + Wait(ctx context.Context) (err error) + WaitN(ctx context.Context, n int) (err error) +} + +type Reservation interface { + Cancel() + CancelAt(t time.Time) + Delay() time.Duration + DelayFrom(t time.Time) time.Duration + OK() bool +} diff --git a/flytepropeller/pkg/controller/mocks/limiter.go b/flytepropeller/pkg/controller/mocks/limiter.go new file mode 100644 index 0000000000..709cdd4d65 --- /dev/null +++ b/flytepropeller/pkg/controller/mocks/limiter.go @@ -0,0 +1,637 @@ +// Code generated by mockery v2.40.3. DO NOT EDIT. + +package mocks + +import ( + context "context" + + interfaces "github.com/flyteorg/flyte/flytepropeller/pkg/controller/interfaces" + mock "github.com/stretchr/testify/mock" + + rate "golang.org/x/time/rate" + + time "time" +) + +// Limiter is an autogenerated mock type for the Limiter type +type Limiter struct { + mock.Mock +} + +type Limiter_Expecter struct { + mock *mock.Mock +} + +func (_m *Limiter) EXPECT() *Limiter_Expecter { + return &Limiter_Expecter{mock: &_m.Mock} +} + +// Allow provides a mock function with given fields: +func (_m *Limiter) Allow() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Allow") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Limiter_Allow_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Allow' +type Limiter_Allow_Call struct { + *mock.Call +} + +// Allow is a helper method to define mock.On call +func (_e *Limiter_Expecter) Allow() *Limiter_Allow_Call { + return &Limiter_Allow_Call{Call: _e.mock.On("Allow")} +} + +func (_c *Limiter_Allow_Call) Run(run func()) *Limiter_Allow_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Limiter_Allow_Call) Return(_a0 bool) *Limiter_Allow_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Limiter_Allow_Call) RunAndReturn(run func() bool) *Limiter_Allow_Call { + _c.Call.Return(run) + return _c +} + +// AllowN provides a mock function with given fields: t, n +func (_m *Limiter) AllowN(t time.Time, n int) bool { + ret := _m.Called(t, n) + + if len(ret) == 0 { + panic("no return value specified for AllowN") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(time.Time, int) bool); ok { + r0 = rf(t, n) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Limiter_AllowN_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AllowN' +type Limiter_AllowN_Call struct { + *mock.Call +} + +// AllowN is a helper method to define mock.On call +// - t time.Time +// - n int +func (_e *Limiter_Expecter) AllowN(t interface{}, n interface{}) *Limiter_AllowN_Call { + return &Limiter_AllowN_Call{Call: _e.mock.On("AllowN", t, n)} +} + +func (_c *Limiter_AllowN_Call) Run(run func(t time.Time, n int)) *Limiter_AllowN_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(time.Time), args[1].(int)) + }) + return _c +} + +func (_c *Limiter_AllowN_Call) Return(_a0 bool) *Limiter_AllowN_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Limiter_AllowN_Call) RunAndReturn(run func(time.Time, int) bool) *Limiter_AllowN_Call { + _c.Call.Return(run) + return _c +} + +// Burst provides a mock function with given fields: +func (_m *Limiter) Burst() int { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Burst") + } + + var r0 int + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + return r0 +} + +// Limiter_Burst_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Burst' +type Limiter_Burst_Call struct { + *mock.Call +} + +// Burst is a helper method to define mock.On call +func (_e *Limiter_Expecter) Burst() *Limiter_Burst_Call { + return &Limiter_Burst_Call{Call: _e.mock.On("Burst")} +} + +func (_c *Limiter_Burst_Call) Run(run func()) *Limiter_Burst_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Limiter_Burst_Call) Return(_a0 int) *Limiter_Burst_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Limiter_Burst_Call) RunAndReturn(run func() int) *Limiter_Burst_Call { + _c.Call.Return(run) + return _c +} + +// Limit provides a mock function with given fields: +func (_m *Limiter) Limit() rate.Limit { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Limit") + } + + var r0 rate.Limit + if rf, ok := ret.Get(0).(func() rate.Limit); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(rate.Limit) + } + + return r0 +} + +// Limiter_Limit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Limit' +type Limiter_Limit_Call struct { + *mock.Call +} + +// Limit is a helper method to define mock.On call +func (_e *Limiter_Expecter) Limit() *Limiter_Limit_Call { + return &Limiter_Limit_Call{Call: _e.mock.On("Limit")} +} + +func (_c *Limiter_Limit_Call) Run(run func()) *Limiter_Limit_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Limiter_Limit_Call) Return(_a0 rate.Limit) *Limiter_Limit_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Limiter_Limit_Call) RunAndReturn(run func() rate.Limit) *Limiter_Limit_Call { + _c.Call.Return(run) + return _c +} + +// Reserve provides a mock function with given fields: +func (_m *Limiter) Reserve() interfaces.Reservation { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Reserve") + } + + var r0 interfaces.Reservation + if rf, ok := ret.Get(0).(func() interfaces.Reservation); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interfaces.Reservation) + } + } + + return r0 +} + +// Limiter_Reserve_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Reserve' +type Limiter_Reserve_Call struct { + *mock.Call +} + +// Reserve is a helper method to define mock.On call +func (_e *Limiter_Expecter) Reserve() *Limiter_Reserve_Call { + return &Limiter_Reserve_Call{Call: _e.mock.On("Reserve")} +} + +func (_c *Limiter_Reserve_Call) Run(run func()) *Limiter_Reserve_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Limiter_Reserve_Call) Return(_a0 interfaces.Reservation) *Limiter_Reserve_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Limiter_Reserve_Call) RunAndReturn(run func() interfaces.Reservation) *Limiter_Reserve_Call { + _c.Call.Return(run) + return _c +} + +// ReserveN provides a mock function with given fields: t, n +func (_m *Limiter) ReserveN(t time.Time, n int) interfaces.Reservation { + ret := _m.Called(t, n) + + if len(ret) == 0 { + panic("no return value specified for ReserveN") + } + + var r0 interfaces.Reservation + if rf, ok := ret.Get(0).(func(time.Time, int) interfaces.Reservation); ok { + r0 = rf(t, n) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interfaces.Reservation) + } + } + + return r0 +} + +// Limiter_ReserveN_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ReserveN' +type Limiter_ReserveN_Call struct { + *mock.Call +} + +// ReserveN is a helper method to define mock.On call +// - t time.Time +// - n int +func (_e *Limiter_Expecter) ReserveN(t interface{}, n interface{}) *Limiter_ReserveN_Call { + return &Limiter_ReserveN_Call{Call: _e.mock.On("ReserveN", t, n)} +} + +func (_c *Limiter_ReserveN_Call) Run(run func(t time.Time, n int)) *Limiter_ReserveN_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(time.Time), args[1].(int)) + }) + return _c +} + +func (_c *Limiter_ReserveN_Call) Return(_a0 interfaces.Reservation) *Limiter_ReserveN_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Limiter_ReserveN_Call) RunAndReturn(run func(time.Time, int) interfaces.Reservation) *Limiter_ReserveN_Call { + _c.Call.Return(run) + return _c +} + +// SetBurst provides a mock function with given fields: newBurst +func (_m *Limiter) SetBurst(newBurst int) { + _m.Called(newBurst) +} + +// Limiter_SetBurst_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetBurst' +type Limiter_SetBurst_Call struct { + *mock.Call +} + +// SetBurst is a helper method to define mock.On call +// - newBurst int +func (_e *Limiter_Expecter) SetBurst(newBurst interface{}) *Limiter_SetBurst_Call { + return &Limiter_SetBurst_Call{Call: _e.mock.On("SetBurst", newBurst)} +} + +func (_c *Limiter_SetBurst_Call) Run(run func(newBurst int)) *Limiter_SetBurst_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(int)) + }) + return _c +} + +func (_c *Limiter_SetBurst_Call) Return() *Limiter_SetBurst_Call { + _c.Call.Return() + return _c +} + +func (_c *Limiter_SetBurst_Call) RunAndReturn(run func(int)) *Limiter_SetBurst_Call { + _c.Call.Return(run) + return _c +} + +// SetBurstAt provides a mock function with given fields: t, newBurst +func (_m *Limiter) SetBurstAt(t time.Time, newBurst int) { + _m.Called(t, newBurst) +} + +// Limiter_SetBurstAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetBurstAt' +type Limiter_SetBurstAt_Call struct { + *mock.Call +} + +// SetBurstAt is a helper method to define mock.On call +// - t time.Time +// - newBurst int +func (_e *Limiter_Expecter) SetBurstAt(t interface{}, newBurst interface{}) *Limiter_SetBurstAt_Call { + return &Limiter_SetBurstAt_Call{Call: _e.mock.On("SetBurstAt", t, newBurst)} +} + +func (_c *Limiter_SetBurstAt_Call) Run(run func(t time.Time, newBurst int)) *Limiter_SetBurstAt_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(time.Time), args[1].(int)) + }) + return _c +} + +func (_c *Limiter_SetBurstAt_Call) Return() *Limiter_SetBurstAt_Call { + _c.Call.Return() + return _c +} + +func (_c *Limiter_SetBurstAt_Call) RunAndReturn(run func(time.Time, int)) *Limiter_SetBurstAt_Call { + _c.Call.Return(run) + return _c +} + +// SetLimit provides a mock function with given fields: newLimit +func (_m *Limiter) SetLimit(newLimit rate.Limit) { + _m.Called(newLimit) +} + +// Limiter_SetLimit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetLimit' +type Limiter_SetLimit_Call struct { + *mock.Call +} + +// SetLimit is a helper method to define mock.On call +// - newLimit rate.Limit +func (_e *Limiter_Expecter) SetLimit(newLimit interface{}) *Limiter_SetLimit_Call { + return &Limiter_SetLimit_Call{Call: _e.mock.On("SetLimit", newLimit)} +} + +func (_c *Limiter_SetLimit_Call) Run(run func(newLimit rate.Limit)) *Limiter_SetLimit_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(rate.Limit)) + }) + return _c +} + +func (_c *Limiter_SetLimit_Call) Return() *Limiter_SetLimit_Call { + _c.Call.Return() + return _c +} + +func (_c *Limiter_SetLimit_Call) RunAndReturn(run func(rate.Limit)) *Limiter_SetLimit_Call { + _c.Call.Return(run) + return _c +} + +// SetLimitAt provides a mock function with given fields: t, newLimit +func (_m *Limiter) SetLimitAt(t time.Time, newLimit rate.Limit) { + _m.Called(t, newLimit) +} + +// Limiter_SetLimitAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetLimitAt' +type Limiter_SetLimitAt_Call struct { + *mock.Call +} + +// SetLimitAt is a helper method to define mock.On call +// - t time.Time +// - newLimit rate.Limit +func (_e *Limiter_Expecter) SetLimitAt(t interface{}, newLimit interface{}) *Limiter_SetLimitAt_Call { + return &Limiter_SetLimitAt_Call{Call: _e.mock.On("SetLimitAt", t, newLimit)} +} + +func (_c *Limiter_SetLimitAt_Call) Run(run func(t time.Time, newLimit rate.Limit)) *Limiter_SetLimitAt_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(time.Time), args[1].(rate.Limit)) + }) + return _c +} + +func (_c *Limiter_SetLimitAt_Call) Return() *Limiter_SetLimitAt_Call { + _c.Call.Return() + return _c +} + +func (_c *Limiter_SetLimitAt_Call) RunAndReturn(run func(time.Time, rate.Limit)) *Limiter_SetLimitAt_Call { + _c.Call.Return(run) + return _c +} + +// Tokens provides a mock function with given fields: +func (_m *Limiter) Tokens() float64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Tokens") + } + + var r0 float64 + if rf, ok := ret.Get(0).(func() float64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(float64) + } + + return r0 +} + +// Limiter_Tokens_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Tokens' +type Limiter_Tokens_Call struct { + *mock.Call +} + +// Tokens is a helper method to define mock.On call +func (_e *Limiter_Expecter) Tokens() *Limiter_Tokens_Call { + return &Limiter_Tokens_Call{Call: _e.mock.On("Tokens")} +} + +func (_c *Limiter_Tokens_Call) Run(run func()) *Limiter_Tokens_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Limiter_Tokens_Call) Return(_a0 float64) *Limiter_Tokens_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Limiter_Tokens_Call) RunAndReturn(run func() float64) *Limiter_Tokens_Call { + _c.Call.Return(run) + return _c +} + +// TokensAt provides a mock function with given fields: t +func (_m *Limiter) TokensAt(t time.Time) float64 { + ret := _m.Called(t) + + if len(ret) == 0 { + panic("no return value specified for TokensAt") + } + + var r0 float64 + if rf, ok := ret.Get(0).(func(time.Time) float64); ok { + r0 = rf(t) + } else { + r0 = ret.Get(0).(float64) + } + + return r0 +} + +// Limiter_TokensAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TokensAt' +type Limiter_TokensAt_Call struct { + *mock.Call +} + +// TokensAt is a helper method to define mock.On call +// - t time.Time +func (_e *Limiter_Expecter) TokensAt(t interface{}) *Limiter_TokensAt_Call { + return &Limiter_TokensAt_Call{Call: _e.mock.On("TokensAt", t)} +} + +func (_c *Limiter_TokensAt_Call) Run(run func(t time.Time)) *Limiter_TokensAt_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(time.Time)) + }) + return _c +} + +func (_c *Limiter_TokensAt_Call) Return(_a0 float64) *Limiter_TokensAt_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Limiter_TokensAt_Call) RunAndReturn(run func(time.Time) float64) *Limiter_TokensAt_Call { + _c.Call.Return(run) + return _c +} + +// Wait provides a mock function with given fields: ctx +func (_m *Limiter) Wait(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Wait") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Limiter_Wait_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Wait' +type Limiter_Wait_Call struct { + *mock.Call +} + +// Wait is a helper method to define mock.On call +// - ctx context.Context +func (_e *Limiter_Expecter) Wait(ctx interface{}) *Limiter_Wait_Call { + return &Limiter_Wait_Call{Call: _e.mock.On("Wait", ctx)} +} + +func (_c *Limiter_Wait_Call) Run(run func(ctx context.Context)) *Limiter_Wait_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *Limiter_Wait_Call) Return(err error) *Limiter_Wait_Call { + _c.Call.Return(err) + return _c +} + +func (_c *Limiter_Wait_Call) RunAndReturn(run func(context.Context) error) *Limiter_Wait_Call { + _c.Call.Return(run) + return _c +} + +// WaitN provides a mock function with given fields: ctx, n +func (_m *Limiter) WaitN(ctx context.Context, n int) error { + ret := _m.Called(ctx, n) + + if len(ret) == 0 { + panic("no return value specified for WaitN") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int) error); ok { + r0 = rf(ctx, n) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Limiter_WaitN_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WaitN' +type Limiter_WaitN_Call struct { + *mock.Call +} + +// WaitN is a helper method to define mock.On call +// - ctx context.Context +// - n int +func (_e *Limiter_Expecter) WaitN(ctx interface{}, n interface{}) *Limiter_WaitN_Call { + return &Limiter_WaitN_Call{Call: _e.mock.On("WaitN", ctx, n)} +} + +func (_c *Limiter_WaitN_Call) Run(run func(ctx context.Context, n int)) *Limiter_WaitN_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(int)) + }) + return _c +} + +func (_c *Limiter_WaitN_Call) Return(err error) *Limiter_WaitN_Call { + _c.Call.Return(err) + return _c +} + +func (_c *Limiter_WaitN_Call) RunAndReturn(run func(context.Context, int) error) *Limiter_WaitN_Call { + _c.Call.Return(run) + return _c +} + +// NewLimiter creates a new instance of Limiter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLimiter(t interface { + mock.TestingT + Cleanup(func()) +}) *Limiter { + mock := &Limiter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/flytepropeller/pkg/controller/mocks/reservation.go b/flytepropeller/pkg/controller/mocks/reservation.go new file mode 100644 index 0000000000..d609c0b034 --- /dev/null +++ b/flytepropeller/pkg/controller/mocks/reservation.go @@ -0,0 +1,237 @@ +// Code generated by mockery v2.40.3. DO NOT EDIT. + +package mocks + +import ( + time "time" + + mock "github.com/stretchr/testify/mock" +) + +// Reservation is an autogenerated mock type for the Reservation type +type Reservation struct { + mock.Mock +} + +type Reservation_Expecter struct { + mock *mock.Mock +} + +func (_m *Reservation) EXPECT() *Reservation_Expecter { + return &Reservation_Expecter{mock: &_m.Mock} +} + +// Cancel provides a mock function with given fields: +func (_m *Reservation) Cancel() { + _m.Called() +} + +// Reservation_Cancel_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Cancel' +type Reservation_Cancel_Call struct { + *mock.Call +} + +// Cancel is a helper method to define mock.On call +func (_e *Reservation_Expecter) Cancel() *Reservation_Cancel_Call { + return &Reservation_Cancel_Call{Call: _e.mock.On("Cancel")} +} + +func (_c *Reservation_Cancel_Call) Run(run func()) *Reservation_Cancel_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Reservation_Cancel_Call) Return() *Reservation_Cancel_Call { + _c.Call.Return() + return _c +} + +func (_c *Reservation_Cancel_Call) RunAndReturn(run func()) *Reservation_Cancel_Call { + _c.Call.Return(run) + return _c +} + +// CancelAt provides a mock function with given fields: t +func (_m *Reservation) CancelAt(t time.Time) { + _m.Called(t) +} + +// Reservation_CancelAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CancelAt' +type Reservation_CancelAt_Call struct { + *mock.Call +} + +// CancelAt is a helper method to define mock.On call +// - t time.Time +func (_e *Reservation_Expecter) CancelAt(t interface{}) *Reservation_CancelAt_Call { + return &Reservation_CancelAt_Call{Call: _e.mock.On("CancelAt", t)} +} + +func (_c *Reservation_CancelAt_Call) Run(run func(t time.Time)) *Reservation_CancelAt_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(time.Time)) + }) + return _c +} + +func (_c *Reservation_CancelAt_Call) Return() *Reservation_CancelAt_Call { + _c.Call.Return() + return _c +} + +func (_c *Reservation_CancelAt_Call) RunAndReturn(run func(time.Time)) *Reservation_CancelAt_Call { + _c.Call.Return(run) + return _c +} + +// Delay provides a mock function with given fields: +func (_m *Reservation) Delay() time.Duration { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Delay") + } + + var r0 time.Duration + if rf, ok := ret.Get(0).(func() time.Duration); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Duration) + } + + return r0 +} + +// Reservation_Delay_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Delay' +type Reservation_Delay_Call struct { + *mock.Call +} + +// Delay is a helper method to define mock.On call +func (_e *Reservation_Expecter) Delay() *Reservation_Delay_Call { + return &Reservation_Delay_Call{Call: _e.mock.On("Delay")} +} + +func (_c *Reservation_Delay_Call) Run(run func()) *Reservation_Delay_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Reservation_Delay_Call) Return(_a0 time.Duration) *Reservation_Delay_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Reservation_Delay_Call) RunAndReturn(run func() time.Duration) *Reservation_Delay_Call { + _c.Call.Return(run) + return _c +} + +// DelayFrom provides a mock function with given fields: t +func (_m *Reservation) DelayFrom(t time.Time) time.Duration { + ret := _m.Called(t) + + if len(ret) == 0 { + panic("no return value specified for DelayFrom") + } + + var r0 time.Duration + if rf, ok := ret.Get(0).(func(time.Time) time.Duration); ok { + r0 = rf(t) + } else { + r0 = ret.Get(0).(time.Duration) + } + + return r0 +} + +// Reservation_DelayFrom_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DelayFrom' +type Reservation_DelayFrom_Call struct { + *mock.Call +} + +// DelayFrom is a helper method to define mock.On call +// - t time.Time +func (_e *Reservation_Expecter) DelayFrom(t interface{}) *Reservation_DelayFrom_Call { + return &Reservation_DelayFrom_Call{Call: _e.mock.On("DelayFrom", t)} +} + +func (_c *Reservation_DelayFrom_Call) Run(run func(t time.Time)) *Reservation_DelayFrom_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(time.Time)) + }) + return _c +} + +func (_c *Reservation_DelayFrom_Call) Return(_a0 time.Duration) *Reservation_DelayFrom_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Reservation_DelayFrom_Call) RunAndReturn(run func(time.Time) time.Duration) *Reservation_DelayFrom_Call { + _c.Call.Return(run) + return _c +} + +// OK provides a mock function with given fields: +func (_m *Reservation) OK() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for OK") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Reservation_OK_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OK' +type Reservation_OK_Call struct { + *mock.Call +} + +// OK is a helper method to define mock.On call +func (_e *Reservation_Expecter) OK() *Reservation_OK_Call { + return &Reservation_OK_Call{Call: _e.mock.On("OK")} +} + +func (_c *Reservation_OK_Call) Run(run func()) *Reservation_OK_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Reservation_OK_Call) Return(_a0 bool) *Reservation_OK_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Reservation_OK_Call) RunAndReturn(run func() bool) *Reservation_OK_Call { + _c.Call.Return(run) + return _c +} + +// NewReservation creates a new instance of Reservation. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReservation(t interface { + mock.TestingT + Cleanup(func()) +}) *Reservation { + mock := &Reservation{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/flytepropeller/pkg/controller/nodes/array/event_recorder.go b/flytepropeller/pkg/controller/nodes/array/event_recorder.go index 999b383f39..8d14bd1d43 100644 --- a/flytepropeller/pkg/controller/nodes/array/event_recorder.go +++ b/flytepropeller/pkg/controller/nodes/array/event_recorder.go @@ -93,10 +93,10 @@ func (e *externalResourcesEventRecorder) process(ctx context.Context, nCtx inter // process events cacheStatus := idlcore.CatalogCacheStatus_CACHE_DISABLED for _, nodeExecutionEvent := range e.nodeEvents { - switch target := nodeExecutionEvent.TargetMetadata.(type) { + switch target := nodeExecutionEvent.GetTargetMetadata().(type) { case *event.NodeExecutionEvent_TaskNodeMetadata: if target.TaskNodeMetadata != nil { - cacheStatus = target.TaskNodeMetadata.CacheStatus + cacheStatus = target.TaskNodeMetadata.GetCacheStatus() } } } @@ -106,7 +106,7 @@ func (e *externalResourcesEventRecorder) process(ctx context.Context, nCtx inter if cacheStatus == idlcore.CatalogCacheStatus_CACHE_HIT && len(e.taskEvents) == 0 { e.externalResources = append(e.externalResources, &event.ExternalResourceInfo{ ExternalId: externalResourceID, - Index: uint32(index), + Index: uint32(index), // #nosec G115 RetryAttempt: retryAttempt, Phase: idlcore.TaskExecution_SUCCEEDED, CacheStatus: cacheStatus, @@ -122,7 +122,7 @@ func (e *externalResourcesEventRecorder) process(ctx context.Context, nCtx inter } for _, taskExecutionEvent := range e.taskEvents { - if mapLogPlugin != nil && len(taskExecutionEvent.Logs) > 0 { + if mapLogPlugin != nil && len(taskExecutionEvent.GetLogs()) > 0 { // override log links for subNode execution with map plugin logs, err := getPluginLogs(mapLogPlugin, nCtx, index, retryAttempt) if err != nil { @@ -132,16 +132,16 @@ func (e *externalResourcesEventRecorder) process(ctx context.Context, nCtx inter } } - for _, log := range taskExecutionEvent.Logs { - log.Name = fmt.Sprintf("%s-%d", log.Name, index) + for _, log := range taskExecutionEvent.GetLogs() { + log.Name = fmt.Sprintf("%s-%d", log.GetName(), index) } e.externalResources = append(e.externalResources, &event.ExternalResourceInfo{ ExternalId: externalResourceID, - Index: uint32(index), - Logs: taskExecutionEvent.Logs, + Index: uint32(index), // #nosec G115 + Logs: taskExecutionEvent.GetLogs(), RetryAttempt: retryAttempt, - Phase: taskExecutionEvent.Phase, + Phase: taskExecutionEvent.GetPhase(), CacheStatus: cacheStatus, }) } @@ -175,7 +175,7 @@ func (e *externalResourcesEventRecorder) finalize(ctx context.Context, nCtx inte nodeExecutionID := *nCtx.NodeExecutionMetadata().GetNodeExecutionID() if nCtx.ExecutionContext().GetEventVersion() != v1alpha1.EventVersion0 { - currentNodeUniqueID, err := common.GenerateUniqueID(nCtx.ExecutionContext().GetParentInfo(), nodeExecutionID.NodeId) + currentNodeUniqueID, err := common.GenerateUniqueID(nCtx.ExecutionContext().GetParentInfo(), nodeExecutionID.GetNodeId()) if err != nil { return err } @@ -315,7 +315,7 @@ func getPluginLogs(logPlugin tasklog.Plugin, nCtx interfaces.NodeExecutionContex extraLogTemplateVars := []tasklog.TemplateVar{ { Regex: mapplugin.LogTemplateRegexes.ExecutionIndex, - Value: strconv.FormatUint(uint64(index), 10), + Value: strconv.FormatUint(uint64(index), 10), // #nosec G115 }, { Regex: mapplugin.LogTemplateRegexes.RetryAttempt, @@ -374,12 +374,12 @@ func sendEvents(ctx context.Context, nCtx interfaces.NodeExecutionContext, index taskExecutionEvent := &event.TaskExecutionEvent{ TaskId: &idlcore.Identifier{ ResourceType: idlcore.ResourceType_TASK, - Project: workflowExecutionID.Project, - Domain: workflowExecutionID.Domain, + Project: workflowExecutionID.GetProject(), + Domain: workflowExecutionID.GetDomain(), Name: fmt.Sprintf("%s-%d", buildSubNodeID(nCtx, index), retryAttempt), Version: "v1", // this value is irrelevant but necessary for the identifier to be valid }, - ParentNodeExecutionId: nodeExecutionEvent.Id, + ParentNodeExecutionId: nodeExecutionEvent.GetId(), Phase: taskPhase, TaskType: "k8s-array", OccurredAt: timestamp, diff --git a/flytepropeller/pkg/controller/nodes/array/event_recorder_test.go b/flytepropeller/pkg/controller/nodes/array/event_recorder_test.go index 64fbff7666..5e418d3fc8 100644 --- a/flytepropeller/pkg/controller/nodes/array/event_recorder_test.go +++ b/flytepropeller/pkg/controller/nodes/array/event_recorder_test.go @@ -101,6 +101,6 @@ func TestGetPluginLogs(t *testing.T) { assert.Nil(t, err) assert.Equal(t, len(logConfig.Templates), len(logs)) - assert.Equal(t, "bar", logs[0].Name) - assert.Equal(t, "/console/projects/node_project/domains/node_domain/executions/node_name/nodeId/foo/taskId/task_name/attempt/0/mappedIndex/1/mappedAttempt/0/view/logs?duration=all", logs[0].Uri) + assert.Equal(t, "bar", logs[0].GetName()) + assert.Equal(t, "/console/projects/node_project/domains/node_domain/executions/node_name/nodeId/foo/taskId/task_name/attempt/0/mappedIndex/1/mappedAttempt/0/view/logs?duration=all", logs[0].GetUri()) } diff --git a/flytepropeller/pkg/controller/nodes/array/handler.go b/flytepropeller/pkg/controller/nodes/array/handler.go index 7495c77e16..51d3105a0a 100644 --- a/flytepropeller/pkg/controller/nodes/array/handler.go +++ b/flytepropeller/pkg/controller/nodes/array/handler.go @@ -5,6 +5,9 @@ import ( "fmt" "math" "strconv" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" idlcore "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/core" @@ -28,6 +31,11 @@ import ( "github.com/flyteorg/flyte/flytestdlib/storage" ) +const ( + // value is 3 days of seconds which is covered by 18 bits (262144) + MAX_DELTA_TIMESTAMP = 259200 +) + var ( nilLiteral = &idlcore.Literal{ Value: &idlcore.Literal_Scalar{ @@ -76,7 +84,7 @@ func (a *arrayNodeHandler) Abort(ctx context.Context, nCtx interfaces.NodeExecut switch arrayNodeState.Phase { case v1alpha1.ArrayNodePhaseExecuting, v1alpha1.ArrayNodePhaseFailing: for i, nodePhaseUint64 := range arrayNodeState.SubNodePhases.GetItems() { - nodePhase := v1alpha1.NodePhase(nodePhaseUint64) + nodePhase := v1alpha1.NodePhase(nodePhaseUint64) // #nosec G115 // do not process nodes that have not started or are in a terminal state if nodePhase == v1alpha1.NodePhaseNotYetStarted || isTerminalNodePhase(nodePhase) { @@ -96,7 +104,7 @@ func (a *arrayNodeHandler) Abort(ctx context.Context, nCtx interfaces.NodeExecut messageCollector.Collect(i, err.Error()) } else { // record events transitioning subNodes to aborted - retryAttempt := uint32(arrayNodeState.SubNodeRetryAttempts.GetItem(i)) + retryAttempt := uint32(arrayNodeState.SubNodeRetryAttempts.GetItem(i)) // #nosec G115 if err := sendEvents(ctx, nCtx, i, retryAttempt, idlcore.NodeExecution_ABORTED, idlcore.TaskExecution_ABORTED, eventRecorder, a.eventConfig); err != nil { logger.Warnf(ctx, "failed to record ArrayNode events: %v", err) @@ -110,7 +118,7 @@ func (a *arrayNodeHandler) Abort(ctx context.Context, nCtx interfaces.NodeExecut } if messageCollector.Length() > 0 { - return fmt.Errorf(messageCollector.Summary(events.MaxErrorMessageLength)) + return fmt.Errorf(messageCollector.Summary(events.MaxErrorMessageLength)) //nolint:govet,staticcheck } // update state for subNodes @@ -136,7 +144,7 @@ func (a *arrayNodeHandler) Finalize(ctx context.Context, nCtx interfaces.NodeExe switch arrayNodeState.Phase { case v1alpha1.ArrayNodePhaseExecuting, v1alpha1.ArrayNodePhaseFailing, v1alpha1.ArrayNodePhaseSucceeding: for i, nodePhaseUint64 := range arrayNodeState.SubNodePhases.GetItems() { - nodePhase := v1alpha1.NodePhase(nodePhaseUint64) + nodePhase := v1alpha1.NodePhase(nodePhaseUint64) // #nosec G115 // do not process nodes that have not started or are in a terminal state if nodePhase == v1alpha1.NodePhaseNotYetStarted || isTerminalNodePhase(nodePhase) { @@ -159,7 +167,7 @@ func (a *arrayNodeHandler) Finalize(ctx context.Context, nCtx interfaces.NodeExe } if messageCollector.Length() > 0 { - return fmt.Errorf(messageCollector.Summary(events.MaxErrorMessageLength)) + return fmt.Errorf(messageCollector.Summary(events.MaxErrorMessageLength)) //nolint:govet,staticcheck } return nil @@ -192,7 +200,7 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu size := -1 - for key, variable := range literalMap.Literals { + for key, variable := range literalMap.GetLiterals() { literalType := validators.LiteralTypeForLiteral(variable) err := validators.ValidateLiteralType(literalType) if err != nil { @@ -211,9 +219,9 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu ), nil } } - switch literalType.Type.(type) { + switch literalType.GetType().(type) { case *idlcore.LiteralType_CollectionType: - collectionLength := len(variable.GetCollection().Literals) + collectionLength := len(variable.GetCollection().GetLiterals()) if size == -1 { size = collectionLength } else if size != collectionLength { @@ -254,9 +262,10 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu {arrayReference: &arrayNodeState.SubNodeTaskPhases, maxValue: len(core.Phases) - 1}, {arrayReference: &arrayNodeState.SubNodeRetryAttempts, maxValue: maxAttemptsValue}, {arrayReference: &arrayNodeState.SubNodeSystemFailures, maxValue: maxSystemFailuresValue}, + {arrayReference: &arrayNodeState.SubNodeDeltaTimestamps, maxValue: MAX_DELTA_TIMESTAMP}, } { - *item.arrayReference, err = bitarray.NewCompactArray(uint(size), bitarray.Item(item.maxValue)) + *item.arrayReference, err = bitarray.NewCompactArray(uint(size), bitarray.Item(item.maxValue)) // #nosec G115 if err != nil { return handler.UnknownTransition, err } @@ -288,8 +297,8 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu break } - nodePhase := v1alpha1.NodePhase(nodePhaseUint64) - taskPhase := int(arrayNodeState.SubNodeTaskPhases.GetItem(i)) + nodePhase := v1alpha1.NodePhase(nodePhaseUint64) // #nosec G115 + taskPhase := int(arrayNodeState.SubNodeTaskPhases.GetItem(i)) // #nosec G115 // do not process nodes in terminal state if isTerminalNodePhase(nodePhase) { @@ -370,16 +379,30 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu } // update subNode state - arrayNodeState.SubNodePhases.SetItem(index, uint64(subNodeStatus.GetPhase())) + arrayNodeState.SubNodePhases.SetItem(index, uint64(subNodeStatus.GetPhase())) // #nosec G115 if subNodeStatus.GetTaskNodeStatus() == nil { // resetting task phase because during retries we clear the GetTaskNodeStatus arrayNodeState.SubNodeTaskPhases.SetItem(index, uint64(0)) } else { - arrayNodeState.SubNodeTaskPhases.SetItem(index, uint64(subNodeStatus.GetTaskNodeStatus().GetPhase())) + arrayNodeState.SubNodeTaskPhases.SetItem(index, uint64(subNodeStatus.GetTaskNodeStatus().GetPhase())) // #nosec G115 } arrayNodeState.SubNodeRetryAttempts.SetItem(index, uint64(subNodeStatus.GetAttempts())) arrayNodeState.SubNodeSystemFailures.SetItem(index, uint64(subNodeStatus.GetSystemFailures())) + if arrayNodeState.SubNodeDeltaTimestamps.BitSet != nil { + startedAt := nCtx.NodeStatus().GetLastAttemptStartedAt() + subNodeStartedAt := subNodeStatus.GetLastAttemptStartedAt() + if subNodeStartedAt == nil { + // subNodeStartedAt == nil indicates either (1) node has not started or (2) node status has + // been reset (ex. retryable failure). in both cases we set the delta timestamp to 0 + arrayNodeState.SubNodeDeltaTimestamps.SetItem(index, 0) + } else if startedAt != nil && arrayNodeState.SubNodeDeltaTimestamps.GetItem(index) == 0 { + // otherwise if `SubNodeDeltaTimestamps` is unset, we compute the delta and set it + deltaDuration := uint64(subNodeStartedAt.Time.Sub(startedAt.Time).Seconds()) + arrayNodeState.SubNodeDeltaTimestamps.SetItem(index, deltaDuration) + } + } + // increment task phase version if subNode phase or task phase changed if subNodeStatus.GetPhase() != nodeExecutionRequest.nodePhase || subNodeStatus.GetTaskNodeStatus().GetPhase() != nodeExecutionRequest.taskPhase { incrementTaskPhaseVersion = true @@ -397,7 +420,7 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu failingCount := 0 runningCount := 0 for _, nodePhaseUint64 := range arrayNodeState.SubNodePhases.GetItems() { - nodePhase := v1alpha1.NodePhase(nodePhaseUint64) + nodePhase := v1alpha1.NodePhase(nodePhaseUint64) // #nosec G115 switch nodePhase { case v1alpha1.NodePhaseSucceeded, v1alpha1.NodePhaseRecovered, v1alpha1.NodePhaseSkipped: successCount++ @@ -457,7 +480,7 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu case v1alpha1.ArrayNodePhaseSucceeding: gatherOutputsRequests := make([]*gatherOutputsRequest, 0, len(arrayNodeState.SubNodePhases.GetItems())) for i, nodePhaseUint64 := range arrayNodeState.SubNodePhases.GetItems() { - nodePhase := v1alpha1.NodePhase(nodePhaseUint64) + nodePhase := v1alpha1.NodePhase(nodePhaseUint64) // #nosec G115 gatherOutputsRequest := &gatherOutputsRequest{ ctx: ctx, responseChannel: make(chan struct { @@ -479,8 +502,8 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu continue } - if task.CoreTask() != nil && task.CoreTask().Interface != nil && task.CoreTask().Interface.Outputs != nil { - for name := range task.CoreTask().Interface.Outputs.Variables { + if task.CoreTask() != nil && task.CoreTask().GetInterface() != nil && task.CoreTask().GetInterface().GetOutputs() != nil { + for name := range task.CoreTask().GetInterface().GetOutputs().GetVariables() { outputLiterals[name] = nilLiteral } } @@ -491,7 +514,7 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu }{outputLiterals, nil} } else { // initialize subNode reader - currentAttempt := int(arrayNodeState.SubNodeRetryAttempts.GetItem(i)) + currentAttempt := int(arrayNodeState.SubNodeRetryAttempts.GetItem(i)) // #nosec G115 subDataDir, subOutputDir, err := constructOutputReferences(ctx, nCtx, strconv.Itoa(i), strconv.Itoa(currentAttempt)) if err != nil { @@ -527,7 +550,7 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu } if outputs := taskNode.CoreTask().GetInterface().GetOutputs(); outputs != nil { - for name := range outputs.Variables { + for name := range outputs.GetVariables() { outputLiteral := &idlcore.Literal{ Value: &idlcore.Literal_Collection{ Collection: &idlcore.LiteralCollection{ @@ -724,8 +747,8 @@ func New(nodeExecutor interfaces.Node, eventConfig *config.EventConfig, literalO func (a *arrayNodeHandler) buildArrayNodeContext(ctx context.Context, nCtx interfaces.NodeExecutionContext, arrayNodeState *handler.ArrayNodeState, arrayNode v1alpha1.ExecutableArrayNode, subNodeIndex int, eventRecorder arrayEventRecorder) ( interfaces.Node, executors.ExecutionContext, executors.DAGStructure, executors.NodeLookup, *v1alpha1.NodeSpec, *v1alpha1.NodeStatus, error) { - nodePhase := v1alpha1.NodePhase(arrayNodeState.SubNodePhases.GetItem(subNodeIndex)) - taskPhase := int(arrayNodeState.SubNodeTaskPhases.GetItem(subNodeIndex)) + nodePhase := v1alpha1.NodePhase(arrayNodeState.SubNodePhases.GetItem(subNodeIndex)) // #nosec G115 + taskPhase := int(arrayNodeState.SubNodeTaskPhases.GetItem(subNodeIndex)) // #nosec G115 // need to initialize the inputReader every time to ensure TaskHandler can access for cache lookups / population inputs, err := nCtx.InputReader().Get(ctx) @@ -761,22 +784,31 @@ func (a *arrayNodeHandler) buildArrayNodeContext(ctx context.Context, nCtx inter } // construct output references - currentAttempt := uint32(arrayNodeState.SubNodeRetryAttempts.GetItem(subNodeIndex)) + currentAttempt := uint32(arrayNodeState.SubNodeRetryAttempts.GetItem(subNodeIndex)) // #nosec G115 subDataDir, subOutputDir, err := constructOutputReferences(ctx, nCtx, strconv.Itoa(subNodeIndex), strconv.Itoa(int(currentAttempt))) if err != nil { return nil, nil, nil, nil, nil, nil, err } + // compute start time for subNode using delta timestamp from ArrayNode NodeStatus + var startedAt *metav1.Time + if nCtx.NodeStatus().GetLastAttemptStartedAt() != nil && arrayNodeState.SubNodeDeltaTimestamps.BitSet != nil { + if deltaSeconds := arrayNodeState.SubNodeDeltaTimestamps.GetItem(subNodeIndex); deltaSeconds != 0 { + startedAt = &metav1.Time{Time: nCtx.NodeStatus().GetLastAttemptStartedAt().Add(time.Duration(deltaSeconds) * time.Second)} // #nosec G115 + } + } + subNodeStatus := &v1alpha1.NodeStatus{ Phase: nodePhase, DataDir: subDataDir, OutputDir: subOutputDir, Attempts: currentAttempt, - SystemFailures: uint32(arrayNodeState.SubNodeSystemFailures.GetItem(subNodeIndex)), + SystemFailures: uint32(arrayNodeState.SubNodeSystemFailures.GetItem(subNodeIndex)), // #nosec G115 TaskNodeStatus: &v1alpha1.TaskNodeStatus{ Phase: taskPhase, PluginState: pluginStateBytes, }, + LastAttemptStartedAt: startedAt, } // initialize mocks diff --git a/flytepropeller/pkg/controller/nodes/array/handler_test.go b/flytepropeller/pkg/controller/nodes/array/handler_test.go index 08eea22e09..ac0e4b45ad 100644 --- a/flytepropeller/pkg/controller/nodes/array/handler_test.go +++ b/flytepropeller/pkg/controller/nodes/array/handler_test.go @@ -4,9 +4,11 @@ import ( "context" "fmt" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" idlcore "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" @@ -184,9 +186,15 @@ func createNodeExecutionContext(dataStore *storage.DataStore, eventRecorder inte nCtx.OnNodeStateWriter().Return(nodeStateWriter) // NodeStatus + nowMinus := time.Now().Add(time.Duration(-5) * time.Second) + metav1NowMinus := metav1.Time{ + Time: nowMinus, + } nCtx.OnNodeStatus().Return(&v1alpha1.NodeStatus{ - DataDir: storage.DataReference("s3://bucket/data"), - OutputDir: storage.DataReference("s3://bucket/output"), + DataDir: storage.DataReference("s3://bucket/data"), + OutputDir: storage.DataReference("s3://bucket/output"), + LastAttemptStartedAt: &metav1NowMinus, + StartedAt: &metav1NowMinus, }) return nCtx @@ -252,17 +260,18 @@ func TestAbort(t *testing.T) { {arrayReference: &arrayNodeState.SubNodeTaskPhases, maxValue: len(core.Phases) - 1}, {arrayReference: &arrayNodeState.SubNodeRetryAttempts, maxValue: 1}, {arrayReference: &arrayNodeState.SubNodeSystemFailures, maxValue: 1}, + {arrayReference: &arrayNodeState.SubNodeDeltaTimestamps, maxValue: 1024}, } { - *item.arrayReference, err = bitarray.NewCompactArray(uint(size), bitarray.Item(item.maxValue)) + *item.arrayReference, err = bitarray.NewCompactArray(uint(size), bitarray.Item(item.maxValue)) // #nosec G115 assert.NoError(t, err) } for i, nodePhase := range test.subNodePhases { - arrayNodeState.SubNodePhases.SetItem(i, bitarray.Item(nodePhase)) + arrayNodeState.SubNodePhases.SetItem(i, bitarray.Item(nodePhase)) // #nosec G115 } for i, taskPhase := range test.subNodeTaskPhases { - arrayNodeState.SubNodeTaskPhases.SetItem(i, bitarray.Item(taskPhase)) + arrayNodeState.SubNodeTaskPhases.SetItem(i, bitarray.Item(taskPhase)) // #nosec G115 } // create NodeExecutionContext @@ -277,10 +286,10 @@ func TestAbort(t *testing.T) { if len(test.expectedExternalResourcePhases) > 0 { assert.Equal(t, 1, len(eventRecorder.taskExecutionEvents)) - externalResources := eventRecorder.taskExecutionEvents[0].Metadata.GetExternalResources() + externalResources := eventRecorder.taskExecutionEvents[0].GetMetadata().GetExternalResources() assert.Equal(t, len(test.expectedExternalResourcePhases), len(externalResources)) for i, expectedPhase := range test.expectedExternalResourcePhases { - assert.Equal(t, expectedPhase, externalResources[i].Phase) + assert.Equal(t, expectedPhase, externalResources[i].GetPhase()) } } else { assert.Equal(t, 0, len(eventRecorder.taskExecutionEvents)) @@ -348,17 +357,17 @@ func TestFinalize(t *testing.T) { {arrayReference: &arrayNodeState.SubNodeTaskPhases, maxValue: len(core.Phases) - 1}, {arrayReference: &arrayNodeState.SubNodeRetryAttempts, maxValue: 1}, {arrayReference: &arrayNodeState.SubNodeSystemFailures, maxValue: 1}, + {arrayReference: &arrayNodeState.SubNodeDeltaTimestamps, maxValue: 1024}, } { - - *item.arrayReference, err = bitarray.NewCompactArray(uint(size), bitarray.Item(item.maxValue)) + *item.arrayReference, err = bitarray.NewCompactArray(uint(size), bitarray.Item(item.maxValue)) // #nosec G115 assert.NoError(t, err) } for i, nodePhase := range test.subNodePhases { - arrayNodeState.SubNodePhases.SetItem(i, bitarray.Item(nodePhase)) + arrayNodeState.SubNodePhases.SetItem(i, bitarray.Item(nodePhase)) // #nosec G115 } for i, taskPhase := range test.subNodeTaskPhases { - arrayNodeState.SubNodeTaskPhases.SetItem(i, bitarray.Item(taskPhase)) + arrayNodeState.SubNodeTaskPhases.SetItem(i, bitarray.Item(taskPhase)) // #nosec G115 } // create NodeExecutionContext @@ -447,10 +456,10 @@ func TestHandleArrayNodePhaseNone(t *testing.T) { if len(test.expectedExternalResourcePhases) > 0 { assert.Equal(t, 1, len(eventRecorder.taskExecutionEvents)) - externalResources := eventRecorder.taskExecutionEvents[0].Metadata.GetExternalResources() + externalResources := eventRecorder.taskExecutionEvents[0].GetMetadata().GetExternalResources() assert.Equal(t, len(test.expectedExternalResourcePhases), len(externalResources)) for i, expectedPhase := range test.expectedExternalResourcePhases { - assert.Equal(t, expectedPhase, externalResources[i].Phase) + assert.Equal(t, expectedPhase, externalResources[i].GetPhase()) } } else { assert.Equal(t, 0, len(eventRecorder.taskExecutionEvents)) @@ -475,7 +484,7 @@ func (f *fakeEventRecorder) RecordNodeEvent(ctx context.Context, event *event.No func (f *fakeEventRecorder) RecordTaskEvent(ctx context.Context, event *event.TaskExecutionEvent, eventConfig *config.EventConfig) error { f.recordTaskEventCallCount++ - if f.phaseVersionFailures == 0 || event.PhaseVersion < f.phaseVersionFailures { + if f.phaseVersionFailures == 0 || event.GetPhaseVersion() < f.phaseVersionFailures { return f.taskErr } return nil @@ -507,25 +516,27 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { } tests := []struct { - name string - parallelism *uint32 - minSuccessRatio *float32 - subNodePhases []v1alpha1.NodePhase - subNodeTaskPhases []core.Phase - subNodeTransitions []handler.Transition - expectedArrayNodePhase v1alpha1.ArrayNodePhase - expectedArrayNodeSubPhases []v1alpha1.NodePhase - expectedTransitionPhase handler.EPhase - expectedExternalResourcePhases []idlcore.TaskExecution_Phase - currentWfParallelism uint32 - maxWfParallelism uint32 - incrementParallelismCount uint32 - useFakeEventRecorder bool - eventRecorderFailures uint32 - eventRecorderError error - expectedTaskPhaseVersion uint32 - expectHandleError bool - expectedEventingCalls int + name string + parallelism *uint32 + minSuccessRatio *float32 + subNodePhases []v1alpha1.NodePhase + subNodeTaskPhases []core.Phase + subNodeDeltaTimestamps []uint64 + subNodeTransitions []handler.Transition + expectedArrayNodePhase v1alpha1.ArrayNodePhase + expectedArrayNodeSubPhases []v1alpha1.NodePhase + expectedDiffArrayNodeSubDeltaTimestamps []bool + expectedTransitionPhase handler.EPhase + expectedExternalResourcePhases []idlcore.TaskExecution_Phase + currentWfParallelism uint32 + maxWfParallelism uint32 + incrementParallelismCount uint32 + useFakeEventRecorder bool + eventRecorderFailures uint32 + eventRecorderError error + expectedTaskPhaseVersion uint32 + expectHandleError bool + expectedEventingCalls int }{ { name: "StartAllSubNodes", @@ -828,6 +839,31 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { expectHandleError: true, expectedEventingCalls: 1, }, + { + name: "DeltaTimestampUpdates", + parallelism: uint32Ptr(0), + subNodePhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseQueued, + v1alpha1.NodePhaseRunning, + }, + subNodeTaskPhases: []core.Phase{ + core.PhaseUndefined, + core.PhaseUndefined, + }, + subNodeTransitions: []handler.Transition{ + handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), + handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRetryableFailure(idlcore.ExecutionError_SYSTEM, "", "", &handler.ExecutionInfo{})), + }, + expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseRunning, + v1alpha1.NodePhaseRetryableFailure, + }, + expectedTaskPhaseVersion: 1, + expectedTransitionPhase: handler.EPhaseRunning, + expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_FAILED}, + incrementParallelismCount: 1, + }, } for _, test := range tests { @@ -859,14 +895,18 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { {arrayReference: &arrayNodeState.SubNodeTaskPhases, maxValue: len(core.Phases) - 1}, {arrayReference: &arrayNodeState.SubNodeRetryAttempts, maxValue: 1}, {arrayReference: &arrayNodeState.SubNodeSystemFailures, maxValue: 1}, + {arrayReference: &arrayNodeState.SubNodeDeltaTimestamps, maxValue: 1024}, } { - - *item.arrayReference, err = bitarray.NewCompactArray(uint(size), bitarray.Item(item.maxValue)) + *item.arrayReference, err = bitarray.NewCompactArray(uint(size), bitarray.Item(item.maxValue)) // #nosec G115 assert.NoError(t, err) } for i, nodePhase := range test.subNodePhases { - arrayNodeState.SubNodePhases.SetItem(i, bitarray.Item(nodePhase)) + arrayNodeState.SubNodePhases.SetItem(i, bitarray.Item(nodePhase)) // #nosec G115 + } + + for i, deltaTimestmap := range test.subNodeDeltaTimestamps { + arrayNodeState.SubNodeDeltaTimestamps.SetItem(i, deltaTimestmap) // #nosec G115 } nodeSpec := arrayNodeSpec @@ -921,7 +961,15 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { assert.Equal(t, test.expectedTaskPhaseVersion, arrayNodeState.TaskPhaseVersion) for i, expectedPhase := range test.expectedArrayNodeSubPhases { - assert.Equal(t, expectedPhase, v1alpha1.NodePhase(arrayNodeState.SubNodePhases.GetItem(i))) + assert.Equal(t, expectedPhase, v1alpha1.NodePhase(arrayNodeState.SubNodePhases.GetItem(i))) // #nosec G115 + } + + for i, expectedDiffDeltaTimestamps := range test.expectedDiffArrayNodeSubDeltaTimestamps { + if expectedDiffDeltaTimestamps { + assert.NotEqual(t, arrayNodeState.SubNodeDeltaTimestamps.GetItem(i), test.subNodeDeltaTimestamps[i]) + } else { + assert.Equal(t, arrayNodeState.SubNodeDeltaTimestamps.GetItem(i), test.subNodeDeltaTimestamps[i]) + } } bufferedEventRecorder, ok := eventRecorder.(*bufferedEventRecorder) @@ -929,10 +977,10 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { if len(test.expectedExternalResourcePhases) > 0 { assert.Equal(t, 1, len(bufferedEventRecorder.taskExecutionEvents)) - externalResources := bufferedEventRecorder.taskExecutionEvents[0].Metadata.GetExternalResources() + externalResources := bufferedEventRecorder.taskExecutionEvents[0].GetMetadata().GetExternalResources() assert.Equal(t, len(test.expectedExternalResourcePhases), len(externalResources)) for i, expectedPhase := range test.expectedExternalResourcePhases { - assert.Equal(t, expectedPhase, externalResources[i].Phase) + assert.Equal(t, expectedPhase, externalResources[i].GetPhase()) } } else { assert.Equal(t, 0, len(bufferedEventRecorder.taskExecutionEvents)) @@ -1000,8 +1048,8 @@ func TestHandle_InvalidLiteralType(t *testing.T) { // Validate results assert.Equal(t, test.expectedTransitionType, transition.Type()) assert.Equal(t, test.expectedPhase, transition.Info().GetPhase()) - assert.Equal(t, test.expectedErrorCode, transition.Info().GetErr().Code) - assert.Contains(t, transition.Info().GetErr().Message, test.expectedContainedErrorMsg) + assert.Equal(t, test.expectedErrorCode, transition.Info().GetErr().GetCode()) + assert.Contains(t, transition.Info().GetErr().GetMessage(), test.expectedContainedErrorMsg) }) } } @@ -1175,7 +1223,7 @@ func TestHandleArrayNodePhaseSucceeding(t *testing.T) { subNodePhases, err := bitarray.NewCompactArray(uint(len(test.subNodePhases)), bitarray.Item(v1alpha1.NodePhaseRecovered)) assert.NoError(t, err) for i, nodePhase := range test.subNodePhases { - subNodePhases.SetItem(i, bitarray.Item(nodePhase)) + subNodePhases.SetItem(i, bitarray.Item(nodePhase)) // #nosec G115 } retryAttempts, err := bitarray.NewCompactArray(uint(len(test.subNodePhases)), bitarray.Item(1)) @@ -1303,14 +1351,14 @@ func TestHandleArrayNodePhaseFailing(t *testing.T) { {arrayReference: &arrayNodeState.SubNodeTaskPhases, maxValue: len(core.Phases) - 1}, {arrayReference: &arrayNodeState.SubNodeRetryAttempts, maxValue: 1}, {arrayReference: &arrayNodeState.SubNodeSystemFailures, maxValue: 1}, + {arrayReference: &arrayNodeState.SubNodeDeltaTimestamps, maxValue: 1024}, } { - - *item.arrayReference, err = bitarray.NewCompactArray(uint(len(test.subNodePhases)), bitarray.Item(item.maxValue)) + *item.arrayReference, err = bitarray.NewCompactArray(uint(len(test.subNodePhases)), bitarray.Item(item.maxValue)) // #nosec G115 assert.NoError(t, err) } for i, nodePhase := range test.subNodePhases { - arrayNodeState.SubNodePhases.SetItem(i, bitarray.Item(nodePhase)) + arrayNodeState.SubNodePhases.SetItem(i, bitarray.Item(nodePhase)) // #nosec G115 } // create NodeExecutionContext diff --git a/flytepropeller/pkg/controller/nodes/array/node_execution_context.go b/flytepropeller/pkg/controller/nodes/array/node_execution_context.go index b2c9619695..d55de708c0 100644 --- a/flytepropeller/pkg/controller/nodes/array/node_execution_context.go +++ b/flytepropeller/pkg/controller/nodes/array/node_execution_context.go @@ -29,12 +29,12 @@ func newStaticInputReader(inputPaths io.InputFilePaths, input *core.LiteralMap) func constructLiteralMap(inputs *core.LiteralMap, index int) (*core.LiteralMap, error) { literals := make(map[string]*core.Literal) - for name, literal := range inputs.Literals { + for name, literal := range inputs.GetLiterals() { if literalCollection := literal.GetCollection(); literalCollection != nil { - if index >= len(literalCollection.Literals) { + if index >= len(literalCollection.GetLiterals()) { return nil, fmt.Errorf("index %v out of bounds for literal collection %v", index, name) } - literals[name] = literalCollection.Literals[index] + literals[name] = literalCollection.GetLiterals()[index] } else { literals[name] = literal } @@ -57,12 +57,12 @@ func (a *arrayTaskReader) Read(ctx context.Context) (*core.TaskTemplate, error) // convert output list variable to singular outputVariables := make(map[string]*core.Variable) - for key, value := range originalTaskTemplate.Interface.Outputs.Variables { - switch v := value.Type.Type.(type) { + for key, value := range originalTaskTemplate.GetInterface().GetOutputs().GetVariables() { + switch v := value.GetType().GetType().(type) { case *core.LiteralType_CollectionType: outputVariables[key] = &core.Variable{ Type: v.CollectionType, - Description: value.Description, + Description: value.GetDescription(), } default: outputVariables[key] = value @@ -71,7 +71,7 @@ func (a *arrayTaskReader) Read(ctx context.Context) (*core.TaskTemplate, error) taskTemplate := *originalTaskTemplate taskTemplate.Interface = &core.TypedInterface{ - Inputs: originalTaskTemplate.Interface.Inputs, + Inputs: originalTaskTemplate.GetInterface().GetInputs(), Outputs: &core.VariableMap{ Variables: outputVariables, }, diff --git a/flytepropeller/pkg/controller/nodes/array/utils_test.go b/flytepropeller/pkg/controller/nodes/array/utils_test.go index 2b2c030cd6..eeddd827ea 100644 --- a/flytepropeller/pkg/controller/nodes/array/utils_test.go +++ b/flytepropeller/pkg/controller/nodes/array/utils_test.go @@ -29,10 +29,10 @@ func TestAppendLiteral(t *testing.T) { } for _, v := range outputLiterals { - collection, ok := v.Value.(*idlcore.Literal_Collection) + collection, ok := v.GetValue().(*idlcore.Literal_Collection) assert.True(t, ok) - assert.Equal(t, 2, len(collection.Collection.Literals)) + assert.Equal(t, 2, len(collection.Collection.GetLiterals())) } } diff --git a/flytepropeller/pkg/controller/nodes/attr_path_resolver_test.go b/flytepropeller/pkg/controller/nodes/attr_path_resolver_test.go index e8e28ac08f..f617025ed9 100644 --- a/flytepropeller/pkg/controller/nodes/attr_path_resolver_test.go +++ b/flytepropeller/pkg/controller/nodes/attr_path_resolver_test.go @@ -520,7 +520,7 @@ func TestResolveAttrPathInBinary(t *testing.T) { collection := make([]any, len(literals)) for i, l := range literals { var v any - _ = msgpack.Unmarshal(l.GetScalar().GetBinary().Value, &v) + _ = msgpack.Unmarshal(l.GetScalar().GetBinary().GetValue(), &v) collection[i] = v } return collection @@ -1434,10 +1434,10 @@ func TestResolveAttrPathInBinary(t *testing.T) { // Helper function to unmarshal a Binary Literal into an any unmarshalBinaryLiteral := func(literal *core.Literal) (any, error) { - if scalar, ok := literal.Value.(*core.Literal_Scalar); ok { - if binary, ok := scalar.Scalar.Value.(*core.Scalar_Binary); ok { + if scalar, ok := literal.GetValue().(*core.Literal_Scalar); ok { + if binary, ok := scalar.Scalar.GetValue().(*core.Scalar_Binary); ok { var value any - err := msgpack.Unmarshal(binary.Binary.Value, &value) + err := msgpack.Unmarshal(binary.Binary.GetValue(), &value) return value, err } } diff --git a/flytepropeller/pkg/controller/nodes/branch/comparator.go b/flytepropeller/pkg/controller/nodes/branch/comparator.go index cfac3be0af..b52456ea51 100644 --- a/flytepropeller/pkg/controller/nodes/branch/comparator.go +++ b/flytepropeller/pkg/controller/nodes/branch/comparator.go @@ -72,14 +72,14 @@ var perTypeComparators = map[string]comparators{ } func Evaluate(lValue *core.Primitive, rValue *core.Primitive, op core.ComparisonExpression_Operator) (bool, error) { - lValueType := reflect.TypeOf(lValue.Value) - rValueType := reflect.TypeOf(rValue.Value) + lValueType := reflect.TypeOf(lValue.GetValue()) + rValueType := reflect.TypeOf(rValue.GetValue()) if lValueType != rValueType { return false, errors.Errorf(ErrorCodeMalformedBranch, "Comparison between different primitives types. lVal[%v]:rVal[%v]", lValueType, rValueType) } comps, ok := perTypeComparators[lValueType.String()] if !ok { - return false, errors.Errorf("Comparator not defined for type: [%v]", lValueType.String()) + return false, errors.Errorf("Comparator not defined for type: [%v]", lValueType.String()) //nolint:govet,staticcheck } isBoolean := false if lValueType.String() == primitiveBooleanType { diff --git a/flytepropeller/pkg/controller/nodes/branch/evaluator.go b/flytepropeller/pkg/controller/nodes/branch/evaluator.go index 4bc1676745..c6d717cfe8 100644 --- a/flytepropeller/pkg/controller/nodes/branch/evaluator.go +++ b/flytepropeller/pkg/controller/nodes/branch/evaluator.go @@ -27,7 +27,7 @@ func EvaluateComparison(expr *core.ComparisonExpression, nodeInputs *core.Litera if nodeInputs == nil { return false, errors.Errorf(ErrorCodeMalformedBranch, "Failed to find Value for Variable [%v]", expr.GetLeftValue().GetVar()) } - lValue = nodeInputs.Literals[expr.GetLeftValue().GetVar()] + lValue = nodeInputs.GetLiterals()[expr.GetLeftValue().GetVar()] if lValue == nil { return false, errors.Errorf(ErrorCodeMalformedBranch, "Failed to find Value for Variable [%v]", expr.GetLeftValue().GetVar()) } @@ -39,7 +39,7 @@ func EvaluateComparison(expr *core.ComparisonExpression, nodeInputs *core.Litera if nodeInputs == nil { return false, errors.Errorf(ErrorCodeMalformedBranch, "Failed to find Value for Variable [%v]", expr.GetLeftValue().GetVar()) } - rValue = nodeInputs.Literals[expr.GetRightValue().GetVar()] + rValue = nodeInputs.GetLiterals()[expr.GetRightValue().GetVar()] if rValue == nil { return false, errors.Errorf(ErrorCodeMalformedBranch, "Failed to find Value for Variable [%v]", expr.GetRightValue().GetVar()) } @@ -136,7 +136,7 @@ func DecideBranch(ctx context.Context, nl executors.NodeLookup, nodeID v1alpha1. if selectedNodeID == nil { if node.GetElseFail() != nil { - return nil, errors.Errorf(ErrorCodeUserProvidedError, node.GetElseFail().Message) + return nil, errors.Errorf(ErrorCodeUserProvidedError, node.GetElseFail().GetMessage()) //nolint:govet,staticcheck } return nil, errors.Errorf(ErrorCodeMalformedBranch, "No branch satisfied") } diff --git a/flytepropeller/pkg/controller/nodes/branch/evaluator_test.go b/flytepropeller/pkg/controller/nodes/branch/evaluator_test.go index dae8a1337b..ae29572a22 100644 --- a/flytepropeller/pkg/controller/nodes/branch/evaluator_test.go +++ b/flytepropeller/pkg/controller/nodes/branch/evaluator_test.go @@ -279,7 +279,7 @@ func TestEvaluateBooleanExpression(t *testing.T) { }, } - for k, v := range inputs.Literals { + for k, v := range inputs.GetLiterals() { outerInputs.Literals[k] = v } diff --git a/flytepropeller/pkg/controller/nodes/branch/handler.go b/flytepropeller/pkg/controller/nodes/branch/handler.go index 431f5fa3eb..9789b65c22 100644 --- a/flytepropeller/pkg/controller/nodes/branch/handler.go +++ b/flytepropeller/pkg/controller/nodes/branch/handler.go @@ -183,7 +183,7 @@ func (b *branchHandler) Abort(ctx context.Context, nCtx interfaces.NodeExecution // We should never reach here, but for safety and completeness errMsg := "branch evaluation failed" if branch.GetElseFail() != nil { - errMsg = branch.GetElseFail().Message + errMsg = branch.GetElseFail().GetMessage() } logger.Errorf(ctx, errMsg) return nil @@ -227,7 +227,7 @@ func (b *branchHandler) Finalize(ctx context.Context, nCtx interfaces.NodeExecut // We should never reach here, but for safety and completeness errMsg := "branch evaluation failed" if branch.GetElseFail() != nil { - errMsg = branch.GetElseFail().Message + errMsg = branch.GetElseFail().GetMessage() } logger.Errorf(ctx, "failed to evaluate branch - user error: %s", errMsg) return nil diff --git a/flytepropeller/pkg/controller/nodes/cache.go b/flytepropeller/pkg/controller/nodes/cache.go index e8e7fc3720..5b1188aa56 100644 --- a/flytepropeller/pkg/controller/nodes/cache.go +++ b/flytepropeller/pkg/controller/nodes/cache.go @@ -105,12 +105,12 @@ func (n *nodeExecutor) CheckCatalogCache(ctx context.Context, nCtx interfaces.No return entry, nil } - logger.Infof(ctx, "Catalog CacheHit: for task [%s/%s/%s/%s]", catalogKey.Identifier.Project, - catalogKey.Identifier.Domain, catalogKey.Identifier.Name, catalogKey.Identifier.Version) + logger.Infof(ctx, "Catalog CacheHit: for task [%s/%s/%s/%s]", catalogKey.Identifier.GetProject(), + catalogKey.Identifier.GetDomain(), catalogKey.Identifier.GetName(), catalogKey.Identifier.GetVersion()) n.metrics.catalogHitCount.Inc(ctx) iface := catalogKey.TypedInterface - if iface.Outputs != nil && len(iface.Outputs.Variables) > 0 { + if iface.GetOutputs() != nil && len(iface.GetOutputs().GetVariables()) > 0 { // copy cached outputs to node outputs o, ee, err := entry.GetOutputs().Read(ctx) if err != nil { @@ -157,15 +157,15 @@ func (n *nodeExecutor) GetOrExtendCatalogReservation(ctx context.Context, nCtx i } var status core.CatalogReservation_Status - if reservation.OwnerId == ownerID { + if reservation.GetOwnerId() == ownerID { status = core.CatalogReservation_RESERVATION_ACQUIRED } else { status = core.CatalogReservation_RESERVATION_EXISTS } n.metrics.reservationGetSuccessCount.Inc(ctx) - return catalog.NewReservationEntry(reservation.ExpiresAt.AsTime(), - reservation.HeartbeatInterval.AsDuration(), reservation.OwnerId, status), nil + return catalog.NewReservationEntry(reservation.GetExpiresAt().AsTime(), + reservation.GetHeartbeatInterval().AsDuration(), reservation.GetOwnerId(), status), nil } // ReleaseCatalogReservation attempts to release an artifact reservation if the task is cacheable @@ -208,12 +208,12 @@ func (n *nodeExecutor) WriteCatalogCache(ctx context.Context, nCtx interfaces.No } iface := catalogKey.TypedInterface - if iface.Outputs != nil && len(iface.Outputs.Variables) == 0 { + if iface.GetOutputs() != nil && len(iface.GetOutputs().GetVariables()) == 0 { return catalog.NewStatus(core.CatalogCacheStatus_CACHE_DISABLED, nil), nil } - logger.Infof(ctx, "Catalog CacheEnabled. recording execution [%s/%s/%s/%s]", catalogKey.Identifier.Project, - catalogKey.Identifier.Domain, catalogKey.Identifier.Name, catalogKey.Identifier.Version) + logger.Infof(ctx, "Catalog CacheEnabled. recording execution [%s/%s/%s/%s]", catalogKey.Identifier.GetProject(), + catalogKey.Identifier.GetDomain(), catalogKey.Identifier.GetName(), catalogKey.Identifier.GetVersion()) outputPaths := ioutils.NewReadOnlyOutputFilePaths(ctx, nCtx.DataStore(), nCtx.NodeStatus().GetOutputDir()) outputReader := ioutils.NewRemoteFileOutputReader(ctx, nCtx.DataStore(), outputPaths, 0) diff --git a/flytepropeller/pkg/controller/nodes/cache_test.go b/flytepropeller/pkg/controller/nodes/cache_test.go index fa9eecadb2..f6c57b31de 100644 --- a/flytepropeller/pkg/controller/nodes/cache_test.go +++ b/flytepropeller/pkg/controller/nodes/cache_test.go @@ -128,11 +128,11 @@ func TestUpdatePhaseCacheInfo(t *testing.T) { // ensure cache and reservation status' are being set correctly if test.cacheStatus != nil { - assert.Equal(t, cacheStatus.GetCacheStatus(), phaseInfo.GetInfo().TaskNodeInfo.TaskNodeMetadata.CacheStatus) + assert.Equal(t, cacheStatus.GetCacheStatus(), phaseInfo.GetInfo().TaskNodeInfo.TaskNodeMetadata.GetCacheStatus()) } if test.reservationStatus != nil { - assert.Equal(t, reservationStatus, phaseInfo.GetInfo().TaskNodeInfo.TaskNodeMetadata.ReservationStatus) + assert.Equal(t, reservationStatus, phaseInfo.GetInfo().TaskNodeInfo.TaskNodeMetadata.GetReservationStatus()) } }) } diff --git a/flytepropeller/pkg/controller/nodes/catalog/config.go b/flytepropeller/pkg/controller/nodes/catalog/config.go index 4dd7bc70ae..cededafcae 100644 --- a/flytepropeller/pkg/controller/nodes/catalog/config.go +++ b/flytepropeller/pkg/controller/nodes/catalog/config.go @@ -82,7 +82,7 @@ func NewCatalogClient(ctx context.Context, authOpt ...grpc.DialOption) (catalog. case DataCatalogType: return datacatalog.NewDataCatalog(ctx, catalogConfig.Endpoint, catalogConfig.Insecure, catalogConfig.MaxCacheAge.Duration, catalogConfig.UseAdminAuth, catalogConfig.DefaultServiceConfig, - uint(catalogConfig.MaxRetries), catalogConfig.BackoffScalar, catalogConfig.GetBackoffJitter(ctx), authOpt...) + uint(catalogConfig.MaxRetries), catalogConfig.BackoffScalar, catalogConfig.GetBackoffJitter(ctx), authOpt...) // #nosec G115 case NoOpDiscoveryType, "": return NOOPCatalog{}, nil } diff --git a/flytepropeller/pkg/controller/nodes/catalog/datacatalog/datacatalog.go b/flytepropeller/pkg/controller/nodes/catalog/datacatalog/datacatalog.go index 00a99d6c54..b10f5d0291 100644 --- a/flytepropeller/pkg/controller/nodes/catalog/datacatalog/datacatalog.go +++ b/flytepropeller/pkg/controller/nodes/catalog/datacatalog/datacatalog.go @@ -54,14 +54,14 @@ func (m *CatalogClient) GetDataset(ctx context.Context, key catalog.Key) (*datac return nil, err } - return datasetResponse.Dataset, nil + return datasetResponse.GetDataset(), nil } // GetArtifactByTag retrieves an artifact using the provided tag and dataset. func (m *CatalogClient) GetArtifactByTag(ctx context.Context, tagName string, dataset *datacatalog.Dataset) (*datacatalog.Artifact, error) { logger.Debugf(ctx, "Get Artifact by tag %v", tagName) artifactQuery := &datacatalog.GetArtifactRequest{ - Dataset: dataset.Id, + Dataset: dataset.GetId(), QueryHandle: &datacatalog.GetArtifactRequest_TagName{ TagName: tagName, }, @@ -73,21 +73,21 @@ func (m *CatalogClient) GetArtifactByTag(ctx context.Context, tagName string, da // check artifact's age if the configuration specifies a max age if m.maxCacheAge > time.Duration(0) { - artifact := response.Artifact - createdAt, err := ptypes.Timestamp(artifact.CreatedAt) + artifact := response.GetArtifact() + createdAt, err := ptypes.Timestamp(artifact.GetCreatedAt()) if err != nil { - logger.Errorf(ctx, "DataCatalog Artifact has invalid createdAt %+v, err: %+v", artifact.CreatedAt, err) + logger.Errorf(ctx, "DataCatalog Artifact has invalid createdAt %+v, err: %+v", artifact.GetCreatedAt(), err) return nil, err } if time.Since(createdAt) > m.maxCacheAge { logger.Warningf(ctx, "Expired Cached Artifact %v created on %v, older than max age %v", - artifact.Id, createdAt.String(), m.maxCacheAge) + artifact.GetId(), createdAt.String(), m.maxCacheAge) return nil, status.Error(codes.NotFound, "Artifact over age limit") } } - return response.Artifact, nil + return response.GetArtifact(), nil } // Get the cached task execution from Catalog. @@ -103,7 +103,7 @@ func (m *CatalogClient) Get(ctx context.Context, key catalog.Key) (catalog.Entry } inputs := &core.LiteralMap{} - if key.TypedInterface.Inputs != nil { + if key.TypedInterface.GetInputs() != nil { retInputs, err := key.InputReader.Get(ctx) if err != nil { return catalog.Entry{}, errors.Wrap(err, "failed to read inputs when trying to query catalog") @@ -139,11 +139,11 @@ func (m *CatalogClient) Get(ctx context.Context, key catalog.Key) (catalog.Entry outputs, err := GenerateTaskOutputsFromArtifact(key.Identifier, key.TypedInterface, artifact) if err != nil { - logger.Errorf(ctx, "DataCatalog failed to get outputs from artifact %+v, err: %+v", artifact.Id, err) + logger.Errorf(ctx, "DataCatalog failed to get outputs from artifact %+v, err: %+v", artifact.GetId(), err) return catalog.NewCatalogEntry(ioutils.NewInMemoryOutputReader(outputs, nil, nil), catalog.NewStatus(core.CatalogCacheStatus_CACHE_MISS, md)), err } - logger.Infof(ctx, "Retrieved %v outputs from artifact %v, tag: %v", len(outputs.Literals), artifact.Id, tag) + logger.Infof(ctx, "Retrieved %v outputs from artifact %v, tag: %v", len(outputs.GetLiterals()), artifact.GetId(), tag) return catalog.NewCatalogEntry(ioutils.NewInMemoryOutputReader(outputs, nil, nil), catalog.NewStatus(core.CatalogCacheStatus_CACHE_HIT, md)), nil } @@ -178,7 +178,7 @@ func (m *CatalogClient) createDataset(ctx context.Context, key catalog.Key, meta func (m *CatalogClient) prepareInputsAndOutputs(ctx context.Context, key catalog.Key, reader io.OutputReader) (inputs *core.LiteralMap, outputs *core.LiteralMap, err error) { inputs = &core.LiteralMap{} outputs = &core.LiteralMap{} - if key.TypedInterface.Inputs != nil && len(key.TypedInterface.Inputs.Variables) != 0 { + if key.TypedInterface.GetInputs() != nil && len(key.TypedInterface.GetInputs().GetVariables()) != 0 { retInputs, err := key.InputReader.Get(ctx) if err != nil { logger.Errorf(ctx, "DataCatalog failed to read inputs err: %s", err) @@ -188,7 +188,7 @@ func (m *CatalogClient) prepareInputsAndOutputs(ctx context.Context, key catalog inputs = retInputs } - if key.TypedInterface.Outputs != nil && len(key.TypedInterface.Outputs.Variables) != 0 { + if key.TypedInterface.GetOutputs() != nil && len(key.TypedInterface.GetOutputs().GetVariables()) != 0 { retOutputs, retErr, err := reader.Read(ctx) if err != nil { logger.Errorf(ctx, "DataCatalog failed to read outputs err: %s", err) @@ -211,8 +211,8 @@ func (m *CatalogClient) createArtifact(ctx context.Context, key catalog.Key, dat logger.Debugf(ctx, "Creating artifact for key %+v, dataset %+v and execution %+v", key, datasetID, metadata) // Create the artifact for the execution that belongs in the task - artifactDataList := make([]*datacatalog.ArtifactData, 0, len(outputs.Literals)) - for name, value := range outputs.Literals { + artifactDataList := make([]*datacatalog.ArtifactData, 0, len(outputs.GetLiterals())) + for name, value := range outputs.GetLiterals() { artifactData := &datacatalog.ArtifactData{ Name: name, Value: value, @@ -230,15 +230,15 @@ func (m *CatalogClient) createArtifact(ctx context.Context, key catalog.Key, dat createArtifactRequest := &datacatalog.CreateArtifactRequest{Artifact: cachedArtifact} _, err := m.client.CreateArtifact(ctx, createArtifactRequest) if err != nil { - logger.Errorf(ctx, "Failed to create Artifact %+v, err: %v", cachedArtifact.Id, err) + logger.Errorf(ctx, "Failed to create Artifact %+v, err: %v", cachedArtifact.GetId(), err) return catalog.Status{}, err } - logger.Debugf(ctx, "Created artifact: %v, with %v outputs from execution %+v", cachedArtifact.Id, len(artifactDataList), metadata) + logger.Debugf(ctx, "Created artifact: %v, with %v outputs from execution %+v", cachedArtifact.GetId(), len(artifactDataList), metadata) // Tag the artifact since it is the cached artifact tagName, err := GenerateArtifactTagName(ctx, inputs, key.CacheIgnoreInputVars) if err != nil { - logger.Errorf(ctx, "Failed to generate tag for artifact %+v, err: %+v", cachedArtifact.Id, err) + logger.Errorf(ctx, "Failed to generate tag for artifact %+v, err: %+v", cachedArtifact.GetId(), err) return catalog.Status{}, err } logger.Infof(ctx, "Cached exec tag: %v, task: %v", tagName, key.Identifier) @@ -247,19 +247,19 @@ func (m *CatalogClient) createArtifact(ctx context.Context, key catalog.Key, dat tag := &datacatalog.Tag{ Name: tagName, Dataset: datasetID, - ArtifactId: cachedArtifact.Id, + ArtifactId: cachedArtifact.GetId(), } _, err = m.client.AddTag(ctx, &datacatalog.AddTagRequest{Tag: tag}) if err != nil { if status.Code(err) == codes.AlreadyExists { - logger.Warnf(ctx, "Tag %v already exists for Artifact %v (idempotent)", tagName, cachedArtifact.Id) + logger.Warnf(ctx, "Tag %v already exists for Artifact %v (idempotent)", tagName, cachedArtifact.GetId()) } else { - logger.Errorf(ctx, "Failed to add tag %+v for artifact %+v, err: %+v", tagName, cachedArtifact.Id, err) + logger.Errorf(ctx, "Failed to add tag %+v for artifact %+v, err: %+v", tagName, cachedArtifact.GetId(), err) return catalog.Status{}, err } } - logger.Debugf(ctx, "Successfully created artifact %+v for key %+v, dataset %+v and execution %+v", cachedArtifact.Id, key, datasetID, metadata) + logger.Debugf(ctx, "Successfully created artifact %+v for key %+v, dataset %+v and execution %+v", cachedArtifact.GetId(), key, datasetID, metadata) return catalog.NewStatus(core.CatalogCacheStatus_CACHE_POPULATED, EventCatalogMetadata(datasetID, tag, nil)), nil } @@ -267,8 +267,8 @@ func (m *CatalogClient) createArtifact(ctx context.Context, key catalog.Key, dat func (m *CatalogClient) updateArtifact(ctx context.Context, key catalog.Key, datasetID *datacatalog.DatasetID, inputs *core.LiteralMap, outputs *core.LiteralMap, metadata catalog.Metadata) (catalog.Status, error) { logger.Debugf(ctx, "Updating artifact for key %+v, dataset %+v and execution %+v", key, datasetID, metadata) - artifactDataList := make([]*datacatalog.ArtifactData, 0, len(outputs.Literals)) - for name, value := range outputs.Literals { + artifactDataList := make([]*datacatalog.ArtifactData, 0, len(outputs.GetLiterals())) + for name, value := range outputs.GetLiterals() { artifactData := &datacatalog.ArtifactData{ Name: name, Value: value, @@ -305,7 +305,7 @@ func (m *CatalogClient) updateArtifact(ctx context.Context, key catalog.Key, dat return catalog.Status{}, fmt.Errorf("failed to get source from metadata. Error: %w", err) } - logger.Debugf(ctx, "Successfully updated artifact with ID %v and %d outputs for key %+v, dataset %+v and execution %+v", tag.ArtifactId, len(artifactDataList), key, datasetID, metadata) + logger.Debugf(ctx, "Successfully updated artifact with ID %v and %d outputs for key %+v, dataset %+v and execution %+v", tag.GetArtifactId(), len(artifactDataList), key, datasetID, metadata) return catalog.NewStatus(core.CatalogCacheStatus_CACHE_POPULATED, EventCatalogMetadata(datasetID, tag, source)), nil } @@ -382,7 +382,7 @@ func (m *CatalogClient) GetOrExtendReservation(ctx context.Context, key catalog. } inputs := &core.LiteralMap{} - if key.TypedInterface.Inputs != nil { + if key.TypedInterface.GetInputs() != nil { retInputs, err := key.InputReader.Get(ctx) if err != nil { return nil, errors.Wrap(err, "failed to read inputs when trying to query catalog") @@ -409,7 +409,7 @@ func (m *CatalogClient) GetOrExtendReservation(ctx context.Context, key catalog. return nil, err } - return response.Reservation, nil + return response.GetReservation(), nil } // ReleaseReservation attempts to release a reservation for a cacheable task. If the reservation @@ -422,7 +422,7 @@ func (m *CatalogClient) ReleaseReservation(ctx context.Context, key catalog.Key, } inputs := &core.LiteralMap{} - if key.TypedInterface.Inputs != nil { + if key.TypedInterface.GetInputs() != nil { retInputs, err := key.InputReader.Get(ctx) if err != nil { return errors.Wrap(err, "failed to read inputs when trying to query catalog") diff --git a/flytepropeller/pkg/controller/nodes/catalog/datacatalog/datacatalog_test.go b/flytepropeller/pkg/controller/nodes/catalog/datacatalog/datacatalog_test.go index ce8f6f4069..2a4c1a07eb 100644 --- a/flytepropeller/pkg/controller/nodes/catalog/datacatalog/datacatalog_test.go +++ b/flytepropeller/pkg/controller/nodes/catalog/datacatalog/datacatalog_test.go @@ -108,7 +108,7 @@ func TestCatalog_Get(t *testing.T) { mockClient.On("GetDataset", ctx, mock.MatchedBy(func(o *datacatalog.GetDatasetRequest) bool { - assert.EqualValues(t, datasetID.String(), o.Dataset.String()) + assert.EqualValues(t, datasetID.String(), o.GetDataset().String()) return true }), ).Return(nil, status.Error(codes.NotFound, "test not found")) @@ -136,7 +136,7 @@ func TestCatalog_Get(t *testing.T) { mockClient.On("GetDataset", ctx, mock.MatchedBy(func(o *datacatalog.GetDatasetRequest) bool { - assert.EqualValues(t, datasetID.String(), o.Dataset.String()) + assert.EqualValues(t, datasetID.String(), o.GetDataset().String()) return true }), ).Return(&datacatalog.GetDatasetResponse{Dataset: sampleDataSet}, nil, "") @@ -167,9 +167,9 @@ func TestCatalog_Get(t *testing.T) { taskID := &core.TaskExecutionIdentifier{ TaskId: &core.Identifier{ ResourceType: core.ResourceType_TASK, - Name: sampleKey.Identifier.Name, - Project: sampleKey.Identifier.Project, - Domain: sampleKey.Identifier.Domain, + Name: sampleKey.Identifier.GetName(), + Project: sampleKey.Identifier.GetProject(), + Domain: sampleKey.Identifier.GetDomain(), Version: "ver", }, NodeExecutionId: &core.NodeExecutionIdentifier{ @@ -190,14 +190,14 @@ func TestCatalog_Get(t *testing.T) { mockClient.On("GetDataset", ctx, mock.MatchedBy(func(o *datacatalog.GetDatasetRequest) bool { - assert.EqualValues(t, datasetID, o.Dataset) + assert.EqualValues(t, datasetID, o.GetDataset()) return true }), ).Return(&datacatalog.GetDatasetResponse{Dataset: sampleDataSet}, nil) sampleArtifact := &datacatalog.Artifact{ Id: "test-artifact", - Dataset: sampleDataSet.Id, + Dataset: sampleDataSet.GetId(), Data: []*datacatalog.ArtifactData{sampleArtifactData}, Metadata: GetArtifactMetadataForSource(taskID), Tags: []*datacatalog.Tag{ @@ -208,16 +208,16 @@ func TestCatalog_Get(t *testing.T) { }, } - assert.Equal(t, taskID.NodeExecutionId.ExecutionId.Name, sampleArtifact.GetMetadata().KeyMap[execNameKey]) - assert.Equal(t, taskID.NodeExecutionId.NodeId, sampleArtifact.GetMetadata().KeyMap[execNodeIDKey]) - assert.Equal(t, taskID.NodeExecutionId.ExecutionId.Project, sampleArtifact.GetMetadata().KeyMap[execProjectKey]) - assert.Equal(t, taskID.NodeExecutionId.ExecutionId.Domain, sampleArtifact.GetMetadata().KeyMap[execDomainKey]) - assert.Equal(t, strconv.Itoa(int(taskID.RetryAttempt)), sampleArtifact.GetMetadata().KeyMap[execTaskAttemptKey]) + assert.Equal(t, taskID.GetNodeExecutionId().GetExecutionId().GetName(), sampleArtifact.GetMetadata().GetKeyMap()[execNameKey]) + assert.Equal(t, taskID.GetNodeExecutionId().GetNodeId(), sampleArtifact.GetMetadata().GetKeyMap()[execNodeIDKey]) + assert.Equal(t, taskID.GetNodeExecutionId().GetExecutionId().GetProject(), sampleArtifact.GetMetadata().GetKeyMap()[execProjectKey]) + assert.Equal(t, taskID.GetNodeExecutionId().GetExecutionId().GetDomain(), sampleArtifact.GetMetadata().GetKeyMap()[execDomainKey]) + assert.Equal(t, strconv.Itoa(int(taskID.GetRetryAttempt())), sampleArtifact.GetMetadata().GetKeyMap()[execTaskAttemptKey]) mockClient.On("GetArtifact", ctx, mock.MatchedBy(func(o *datacatalog.GetArtifactRequest) bool { - assert.EqualValues(t, datasetID, o.Dataset) + assert.EqualValues(t, datasetID, o.GetDataset()) assert.Equal(t, "flyte_cached-BE6CZsMk6N3ExR_4X9EuwBgj2Jh2UwasXK3a_pM9xlY", o.GetTagName()) return true }), @@ -228,18 +228,18 @@ func TestCatalog_Get(t *testing.T) { resp, err := catalogClient.Get(ctx, newKey) assert.NoError(t, err) assert.Equal(t, core.CatalogCacheStatus_CACHE_HIT.String(), resp.GetStatus().GetCacheStatus().String()) - assert.NotNil(t, resp.GetStatus().GetMetadata().DatasetId) - assert.Equal(t, core.ResourceType_DATASET, resp.GetStatus().GetMetadata().DatasetId.ResourceType) - assert.Equal(t, datasetID.Name, resp.GetStatus().GetMetadata().DatasetId.Name) - assert.Equal(t, datasetID.Project, resp.GetStatus().GetMetadata().DatasetId.Project) - assert.Equal(t, datasetID.Domain, resp.GetStatus().GetMetadata().DatasetId.Domain) - assert.Equal(t, datasetID.Version, resp.GetStatus().GetMetadata().DatasetId.Version) - assert.NotNil(t, resp.GetStatus().GetMetadata().ArtifactTag) - assert.NotNil(t, resp.GetStatus().GetMetadata().SourceExecution) + assert.NotNil(t, resp.GetStatus().GetMetadata().GetDatasetId()) + assert.Equal(t, core.ResourceType_DATASET, resp.GetStatus().GetMetadata().GetDatasetId().GetResourceType()) + assert.Equal(t, datasetID.GetName(), resp.GetStatus().GetMetadata().GetDatasetId().GetName()) + assert.Equal(t, datasetID.GetProject(), resp.GetStatus().GetMetadata().GetDatasetId().GetProject()) + assert.Equal(t, datasetID.GetDomain(), resp.GetStatus().GetMetadata().GetDatasetId().GetDomain()) + assert.Equal(t, datasetID.GetVersion(), resp.GetStatus().GetMetadata().GetDatasetId().GetVersion()) + assert.NotNil(t, resp.GetStatus().GetMetadata().GetArtifactTag()) + assert.NotNil(t, resp.GetStatus().GetMetadata().GetSourceExecution()) sourceTID := resp.GetStatus().GetMetadata().GetSourceTaskExecution() - assert.Equal(t, taskID.TaskId.String(), sourceTID.TaskId.String()) - assert.Equal(t, taskID.RetryAttempt, sourceTID.RetryAttempt) - assert.Equal(t, taskID.NodeExecutionId.String(), sourceTID.NodeExecutionId.String()) + assert.Equal(t, taskID.GetTaskId().String(), sourceTID.GetTaskId().String()) + assert.Equal(t, taskID.GetRetryAttempt(), sourceTID.GetRetryAttempt()) + assert.Equal(t, taskID.GetNodeExecutionId().String(), sourceTID.GetNodeExecutionId().String()) }) t.Run("Found expired artifact", func(t *testing.T) { @@ -259,7 +259,7 @@ func TestCatalog_Get(t *testing.T) { mockClient.On("GetDataset", ctx, mock.MatchedBy(func(o *datacatalog.GetDatasetRequest) bool { - assert.EqualValues(t, datasetID, o.Dataset) + assert.EqualValues(t, datasetID, o.GetDataset()) return true }), ).Return(&datacatalog.GetDatasetResponse{Dataset: sampleDataSet}, nil) @@ -268,14 +268,14 @@ func TestCatalog_Get(t *testing.T) { sampleArtifact := &datacatalog.Artifact{ Id: "test-artifact", - Dataset: sampleDataSet.Id, + Dataset: sampleDataSet.GetId(), Data: []*datacatalog.ArtifactData{sampleArtifactData}, CreatedAt: createdAt, } mockClient.On("GetArtifact", ctx, mock.MatchedBy(func(o *datacatalog.GetArtifactRequest) bool { - assert.EqualValues(t, datasetID, o.Dataset) + assert.EqualValues(t, datasetID, o.GetDataset()) assert.Equal(t, "flyte_cached-BE6CZsMk6N3ExR_4X9EuwBgj2Jh2UwasXK3a_pM9xlY", o.GetTagName()) return true }), @@ -309,7 +309,7 @@ func TestCatalog_Get(t *testing.T) { mockClient.On("GetDataset", ctx, mock.MatchedBy(func(o *datacatalog.GetDatasetRequest) bool { - assert.EqualValues(t, datasetID, o.Dataset) + assert.EqualValues(t, datasetID, o.GetDataset()) return true }), ).Return(&datacatalog.GetDatasetResponse{Dataset: sampleDataSet}, nil) @@ -318,14 +318,14 @@ func TestCatalog_Get(t *testing.T) { sampleArtifact := &datacatalog.Artifact{ Id: "test-artifact", - Dataset: sampleDataSet.Id, + Dataset: sampleDataSet.GetId(), Data: []*datacatalog.ArtifactData{sampleArtifactData}, CreatedAt: createdAt, } mockClient.On("GetArtifact", ctx, mock.MatchedBy(func(o *datacatalog.GetArtifactRequest) bool { - assert.EqualValues(t, datasetID, o.Dataset) + assert.EqualValues(t, datasetID, o.GetDataset()) assert.Equal(t, "flyte_cached-BE6CZsMk6N3ExR_4X9EuwBgj2Jh2UwasXK3a_pM9xlY", o.GetTagName()) return true }), @@ -356,20 +356,20 @@ func TestCatalog_Get(t *testing.T) { mockClient.On("GetDataset", ctx, mock.MatchedBy(func(o *datacatalog.GetDatasetRequest) bool { - assert.EqualValues(t, "1.0.0-GKw-c0Pw-GKw-c0Pw", o.Dataset.Version) + assert.EqualValues(t, "1.0.0-GKw-c0Pw-GKw-c0Pw", o.GetDataset().GetVersion()) return true }), ).Return(&datacatalog.GetDatasetResponse{Dataset: sampleDataSet}, nil) sampleArtifact := &datacatalog.Artifact{ Id: "test-artifact", - Dataset: sampleDataSet.Id, + Dataset: sampleDataSet.GetId(), Data: []*datacatalog.ArtifactData{}, } mockClient.On("GetArtifact", ctx, mock.MatchedBy(func(o *datacatalog.GetArtifactRequest) bool { - assert.EqualValues(t, "1.0.0-GKw-c0Pw-GKw-c0Pw", o.Dataset.Version) + assert.EqualValues(t, "1.0.0-GKw-c0Pw-GKw-c0Pw", o.GetDataset().GetVersion()) assert.Equal(t, "flyte_cached-GKw-c0PwFokMUQ6T-TUmEWnZ4_VlQ2Qpgw-vCTT0-OQ", o.GetTagName()) return true }), @@ -385,7 +385,7 @@ func TestCatalog_Get(t *testing.T) { v, e, err := resp.GetOutputs().Read(ctx) assert.NoError(t, err) assert.Nil(t, e) - assert.Len(t, v.Literals, 0) + assert.Len(t, v.GetLiterals(), 0) }) } @@ -404,7 +404,7 @@ func TestCatalog_Put(t *testing.T) { mockClient.On("CreateDataset", ctx, mock.MatchedBy(func(o *datacatalog.CreateDatasetRequest) bool { - assert.True(t, proto.Equal(o.Dataset.Id, datasetID)) + assert.True(t, proto.Equal(o.GetDataset().GetId(), datasetID)) return true }), ).Return(&datacatalog.CreateDatasetResponse{}, nil) @@ -412,11 +412,11 @@ func TestCatalog_Put(t *testing.T) { mockClient.On("CreateArtifact", ctx, mock.MatchedBy(func(o *datacatalog.CreateArtifactRequest) bool { - _, parseErr := uuid.Parse(o.Artifact.Id) + _, parseErr := uuid.Parse(o.GetArtifact().GetId()) assert.NoError(t, parseErr) - assert.EqualValues(t, 1, len(o.Artifact.Data)) - assert.EqualValues(t, "out1", o.Artifact.Data[0].Name) - assert.True(t, proto.Equal(newStringLiteral("output1-stringval"), o.Artifact.Data[0].Value)) + assert.EqualValues(t, 1, len(o.GetArtifact().GetData())) + assert.EqualValues(t, "out1", o.GetArtifact().GetData()[0].GetName()) + assert.True(t, proto.Equal(newStringLiteral("output1-stringval"), o.GetArtifact().GetData()[0].GetValue())) return true }), ).Return(&datacatalog.CreateArtifactResponse{}, nil) @@ -424,7 +424,7 @@ func TestCatalog_Put(t *testing.T) { mockClient.On("AddTag", ctx, mock.MatchedBy(func(o *datacatalog.AddTagRequest) bool { - assert.EqualValues(t, "flyte_cached-BE6CZsMk6N3ExR_4X9EuwBgj2Jh2UwasXK3a_pM9xlY", o.Tag.Name) + assert.EqualValues(t, "flyte_cached-BE6CZsMk6N3ExR_4X9EuwBgj2Jh2UwasXK3a_pM9xlY", o.GetTag().GetName()) return true }), ).Return(&datacatalog.AddTagResponse{}, nil) @@ -440,7 +440,7 @@ func TestCatalog_Put(t *testing.T) { assert.NoError(t, err) assert.Equal(t, core.CatalogCacheStatus_CACHE_POPULATED, s.GetCacheStatus()) assert.NotNil(t, s.GetMetadata()) - assert.Equal(t, "flyte_cached-BE6CZsMk6N3ExR_4X9EuwBgj2Jh2UwasXK3a_pM9xlY", s.GetMetadata().ArtifactTag.Name) + assert.Equal(t, "flyte_cached-BE6CZsMk6N3ExR_4X9EuwBgj2Jh2UwasXK3a_pM9xlY", s.GetMetadata().GetArtifactTag().GetName()) }) t.Run("Create dataset fails", func(t *testing.T) { @@ -519,7 +519,7 @@ func TestCatalog_Put(t *testing.T) { mockClient.On("CreateDataset", ctx, mock.MatchedBy(func(o *datacatalog.CreateDatasetRequest) bool { - assert.Equal(t, "1.0.0-GKw-c0Pw-GKw-c0Pw", o.Dataset.Id.Version) + assert.Equal(t, "1.0.0-GKw-c0Pw-GKw-c0Pw", o.GetDataset().GetId().GetVersion()) return true }), ).Return(&datacatalog.CreateDatasetResponse{}, nil) @@ -527,7 +527,7 @@ func TestCatalog_Put(t *testing.T) { mockClient.On("CreateArtifact", ctx, mock.MatchedBy(func(o *datacatalog.CreateArtifactRequest) bool { - assert.EqualValues(t, 0, len(o.Artifact.Data)) + assert.EqualValues(t, 0, len(o.GetArtifact().GetData())) return true }), ).Return(&datacatalog.CreateArtifactResponse{}, nil) @@ -535,7 +535,7 @@ func TestCatalog_Put(t *testing.T) { mockClient.On("AddTag", ctx, mock.MatchedBy(func(o *datacatalog.AddTagRequest) bool { - assert.EqualValues(t, "flyte_cached-GKw-c0PwFokMUQ6T-TUmEWnZ4_VlQ2Qpgw-vCTT0-OQ", o.Tag.Name) + assert.EqualValues(t, "flyte_cached-GKw-c0PwFokMUQ6T-TUmEWnZ4_VlQ2Qpgw-vCTT0-OQ", o.GetTag().GetName()) return true }), ).Return(&datacatalog.AddTagResponse{}, nil) @@ -567,11 +567,11 @@ func TestCatalog_Put(t *testing.T) { mockClient.On("CreateArtifact", ctx, mock.MatchedBy(func(o *datacatalog.CreateArtifactRequest) bool { - _, parseErr := uuid.Parse(o.Artifact.Id) + _, parseErr := uuid.Parse(o.GetArtifact().GetId()) assert.NoError(t, parseErr) - assert.EqualValues(t, 1, len(o.Artifact.Data)) - assert.EqualValues(t, "out1", o.Artifact.Data[0].Name) - assert.True(t, proto.Equal(newStringLiteral("output1-stringval"), o.Artifact.Data[0].Value)) + assert.EqualValues(t, 1, len(o.GetArtifact().GetData())) + assert.EqualValues(t, "out1", o.GetArtifact().GetData()[0].GetName()) + assert.True(t, proto.Equal(newStringLiteral("output1-stringval"), o.GetArtifact().GetData()[0].GetValue())) createArtifactCalled = true return true }), @@ -581,7 +581,7 @@ func TestCatalog_Put(t *testing.T) { mockClient.On("AddTag", ctx, mock.MatchedBy(func(o *datacatalog.AddTagRequest) bool { - assert.EqualValues(t, "flyte_cached-BE6CZsMk6N3ExR_4X9EuwBgj2Jh2UwasXK3a_pM9xlY", o.Tag.Name) + assert.EqualValues(t, "flyte_cached-BE6CZsMk6N3ExR_4X9EuwBgj2Jh2UwasXK3a_pM9xlY", o.GetTag().GetName()) addTagCalled = true return true }), @@ -619,7 +619,7 @@ func TestCatalog_Update(t *testing.T) { mockClient.On("CreateDataset", ctx, mock.MatchedBy(func(o *datacatalog.CreateDatasetRequest) bool { - assert.True(t, proto.Equal(o.Dataset.Id, datasetID)) + assert.True(t, proto.Equal(o.GetDataset().GetId(), datasetID)) return true }), ).Return(&datacatalog.CreateDatasetResponse{}, nil) @@ -627,8 +627,8 @@ func TestCatalog_Update(t *testing.T) { mockClient.On("UpdateArtifact", ctx, mock.MatchedBy(func(o *datacatalog.UpdateArtifactRequest) bool { - assert.True(t, proto.Equal(o.Dataset, datasetID)) - assert.IsType(t, &datacatalog.UpdateArtifactRequest_TagName{}, o.QueryHandle) + assert.True(t, proto.Equal(o.GetDataset(), datasetID)) + assert.IsType(t, &datacatalog.UpdateArtifactRequest_TagName{}, o.GetQueryHandle()) assert.Equal(t, tagName, o.GetTagName()) return true }), @@ -637,9 +637,9 @@ func TestCatalog_Update(t *testing.T) { taskID := &core.TaskExecutionIdentifier{ TaskId: &core.Identifier{ ResourceType: core.ResourceType_TASK, - Name: sampleKey.Identifier.Name, - Project: sampleKey.Identifier.Project, - Domain: sampleKey.Identifier.Domain, + Name: sampleKey.Identifier.GetName(), + Project: sampleKey.Identifier.GetProject(), + Domain: sampleKey.Identifier.GetDomain(), Version: "version", }, NodeExecutionId: &core.NodeExecutionIdentifier{ @@ -658,24 +658,24 @@ func TestCatalog_Update(t *testing.T) { or := ioutils.NewInMemoryOutputReader(sampleParameters, nil, nil) s, err := discovery.Update(ctx, newKey, or, catalog.Metadata{ WorkflowExecutionIdentifier: &core.WorkflowExecutionIdentifier{ - Name: taskID.NodeExecutionId.ExecutionId.Name, - Domain: taskID.NodeExecutionId.ExecutionId.Domain, - Project: taskID.NodeExecutionId.ExecutionId.Project, + Name: taskID.GetNodeExecutionId().GetExecutionId().GetName(), + Domain: taskID.GetNodeExecutionId().GetExecutionId().GetDomain(), + Project: taskID.GetNodeExecutionId().GetExecutionId().GetProject(), }, TaskExecutionIdentifier: &core.TaskExecutionIdentifier{ TaskId: &sampleKey.Identifier, - NodeExecutionId: taskID.NodeExecutionId, + NodeExecutionId: taskID.GetNodeExecutionId(), RetryAttempt: 0, }, }) assert.NoError(t, err) assert.Equal(t, core.CatalogCacheStatus_CACHE_POPULATED, s.GetCacheStatus()) assert.NotNil(t, s.GetMetadata()) - assert.Equal(t, tagName, s.GetMetadata().ArtifactTag.Name) + assert.Equal(t, tagName, s.GetMetadata().GetArtifactTag().GetName()) sourceTID := s.GetMetadata().GetSourceTaskExecution() - assert.Equal(t, taskID.TaskId.String(), sourceTID.TaskId.String()) - assert.Equal(t, taskID.RetryAttempt, sourceTID.RetryAttempt) - assert.Equal(t, taskID.NodeExecutionId.String(), sourceTID.NodeExecutionId.String()) + assert.Equal(t, taskID.GetTaskId().String(), sourceTID.GetTaskId().String()) + assert.Equal(t, taskID.GetRetryAttempt(), sourceTID.GetRetryAttempt()) + assert.Equal(t, taskID.GetNodeExecutionId().String(), sourceTID.GetNodeExecutionId().String()) }) t.Run("Overwrite non-existing execution", func(t *testing.T) { @@ -706,9 +706,9 @@ func TestCatalog_Update(t *testing.T) { taskID := &core.TaskExecutionIdentifier{ TaskId: &core.Identifier{ ResourceType: core.ResourceType_TASK, - Name: sampleKey.Identifier.Name, - Project: sampleKey.Identifier.Project, - Domain: sampleKey.Identifier.Domain, + Name: sampleKey.Identifier.GetName(), + Project: sampleKey.Identifier.GetProject(), + Domain: sampleKey.Identifier.GetDomain(), Version: "version", }, NodeExecutionId: &core.NodeExecutionIdentifier{ @@ -727,13 +727,13 @@ func TestCatalog_Update(t *testing.T) { or := ioutils.NewInMemoryOutputReader(sampleParameters, nil, nil) s, err := discovery.Update(ctx, newKey, or, catalog.Metadata{ WorkflowExecutionIdentifier: &core.WorkflowExecutionIdentifier{ - Name: taskID.NodeExecutionId.ExecutionId.Name, - Domain: taskID.NodeExecutionId.ExecutionId.Domain, - Project: taskID.NodeExecutionId.ExecutionId.Project, + Name: taskID.GetNodeExecutionId().GetExecutionId().GetName(), + Domain: taskID.GetNodeExecutionId().GetExecutionId().GetDomain(), + Project: taskID.GetNodeExecutionId().GetExecutionId().GetProject(), }, TaskExecutionIdentifier: &core.TaskExecutionIdentifier{ TaskId: &sampleKey.Identifier, - NodeExecutionId: taskID.NodeExecutionId, + NodeExecutionId: taskID.GetNodeExecutionId(), RetryAttempt: 0, }, }) @@ -755,7 +755,7 @@ func TestCatalog_Update(t *testing.T) { mockClient.On("CreateDataset", ctx, mock.MatchedBy(func(o *datacatalog.CreateDatasetRequest) bool { - assert.True(t, proto.Equal(o.Dataset.Id, datasetID)) + assert.True(t, proto.Equal(o.GetDataset().GetId(), datasetID)) createDatasetCalled = true return true }), @@ -770,9 +770,9 @@ func TestCatalog_Update(t *testing.T) { mockClient.On("CreateArtifact", ctx, mock.MatchedBy(func(o *datacatalog.CreateArtifactRequest) bool { - _, parseErr := uuid.Parse(o.Artifact.Id) + _, parseErr := uuid.Parse(o.GetArtifact().GetId()) assert.NoError(t, parseErr) - assert.True(t, proto.Equal(o.Artifact.Dataset, datasetID)) + assert.True(t, proto.Equal(o.GetArtifact().GetDataset(), datasetID)) createArtifactCalled = true return true }), @@ -782,7 +782,7 @@ func TestCatalog_Update(t *testing.T) { mockClient.On("AddTag", ctx, mock.MatchedBy(func(o *datacatalog.AddTagRequest) bool { - assert.EqualValues(t, "flyte_cached-BE6CZsMk6N3ExR_4X9EuwBgj2Jh2UwasXK3a_pM9xlY", o.Tag.Name) + assert.EqualValues(t, "flyte_cached-BE6CZsMk6N3ExR_4X9EuwBgj2Jh2UwasXK3a_pM9xlY", o.GetTag().GetName()) addTagCalled = true return true }), @@ -791,9 +791,9 @@ func TestCatalog_Update(t *testing.T) { taskID := &core.TaskExecutionIdentifier{ TaskId: &core.Identifier{ ResourceType: core.ResourceType_TASK, - Name: sampleKey.Identifier.Name, - Project: sampleKey.Identifier.Project, - Domain: sampleKey.Identifier.Domain, + Name: sampleKey.Identifier.GetName(), + Project: sampleKey.Identifier.GetProject(), + Domain: sampleKey.Identifier.GetDomain(), Version: "version", }, NodeExecutionId: &core.NodeExecutionIdentifier{ @@ -812,20 +812,20 @@ func TestCatalog_Update(t *testing.T) { or := ioutils.NewInMemoryOutputReader(sampleParameters, nil, nil) s, err := discovery.Update(ctx, newKey, or, catalog.Metadata{ WorkflowExecutionIdentifier: &core.WorkflowExecutionIdentifier{ - Name: taskID.NodeExecutionId.ExecutionId.Name, - Domain: taskID.NodeExecutionId.ExecutionId.Domain, - Project: taskID.NodeExecutionId.ExecutionId.Project, + Name: taskID.GetNodeExecutionId().GetExecutionId().GetName(), + Domain: taskID.GetNodeExecutionId().GetExecutionId().GetDomain(), + Project: taskID.GetNodeExecutionId().GetExecutionId().GetProject(), }, TaskExecutionIdentifier: &core.TaskExecutionIdentifier{ TaskId: &sampleKey.Identifier, - NodeExecutionId: taskID.NodeExecutionId, + NodeExecutionId: taskID.GetNodeExecutionId(), RetryAttempt: 0, }, }) assert.NoError(t, err) assert.Equal(t, core.CatalogCacheStatus_CACHE_POPULATED, s.GetCacheStatus()) assert.NotNil(t, s.GetMetadata()) - assert.Equal(t, tagName, s.GetMetadata().ArtifactTag.Name) + assert.Equal(t, tagName, s.GetMetadata().GetArtifactTag().GetName()) assert.Nil(t, s.GetMetadata().GetSourceTaskExecution()) assert.True(t, createDatasetCalled) assert.True(t, updateArtifactCalled) @@ -932,8 +932,8 @@ func TestCatalog_GetOrExtendReservation(t *testing.T) { mockClient.On("GetOrExtendReservation", ctx, mock.MatchedBy(func(o *datacatalog.GetOrExtendReservationRequest) bool { - assert.EqualValues(t, datasetID.String(), o.ReservationId.DatasetId.String()) - assert.EqualValues(t, tagName, o.ReservationId.TagName) + assert.EqualValues(t, datasetID.String(), o.GetReservationId().GetDatasetId().String()) + assert.EqualValues(t, tagName, o.GetReservationId().GetTagName()) return true }), ).Return(&datacatalog.GetOrExtendReservationResponse{Reservation: ¤tReservation}, nil, "") @@ -943,7 +943,7 @@ func TestCatalog_GetOrExtendReservation(t *testing.T) { reservation, err := catalogClient.GetOrExtendReservation(ctx, newKey, currentOwner, heartbeatInterval) assert.NoError(t, err) - assert.Equal(t, reservation.OwnerId, currentOwner) + assert.Equal(t, reservation.GetOwnerId(), currentOwner) }) t.Run("ExistingReservation", func(t *testing.T) { @@ -958,8 +958,8 @@ func TestCatalog_GetOrExtendReservation(t *testing.T) { mockClient.On("GetOrExtendReservation", ctx, mock.MatchedBy(func(o *datacatalog.GetOrExtendReservationRequest) bool { - assert.EqualValues(t, datasetID.String(), o.ReservationId.DatasetId.String()) - assert.EqualValues(t, tagName, o.ReservationId.TagName) + assert.EqualValues(t, datasetID.String(), o.GetReservationId().GetDatasetId().String()) + assert.EqualValues(t, tagName, o.GetReservationId().GetTagName()) return true }), ).Return(&datacatalog.GetOrExtendReservationResponse{Reservation: &prevReservation}, nil, "") @@ -969,7 +969,7 @@ func TestCatalog_GetOrExtendReservation(t *testing.T) { reservation, err := catalogClient.GetOrExtendReservation(ctx, newKey, currentOwner, heartbeatInterval) assert.NoError(t, err) - assert.Equal(t, reservation.OwnerId, prevOwner) + assert.Equal(t, reservation.GetOwnerId(), prevOwner) }) } @@ -988,8 +988,8 @@ func TestCatalog_ReleaseReservation(t *testing.T) { mockClient.On("ReleaseReservation", ctx, mock.MatchedBy(func(o *datacatalog.ReleaseReservationRequest) bool { - assert.EqualValues(t, datasetID.String(), o.ReservationId.DatasetId.String()) - assert.EqualValues(t, tagName, o.ReservationId.TagName) + assert.EqualValues(t, datasetID.String(), o.GetReservationId().GetDatasetId().String()) + assert.EqualValues(t, tagName, o.GetReservationId().GetTagName()) return true }), ).Return(&datacatalog.ReleaseReservationResponse{}, nil, "") @@ -1013,8 +1013,8 @@ func TestCatalog_ReleaseReservation(t *testing.T) { mockClient.On("ReleaseReservation", ctx, mock.MatchedBy(func(o *datacatalog.ReleaseReservationRequest) bool { - assert.EqualValues(t, datasetID.String(), o.ReservationId.DatasetId.String()) - assert.EqualValues(t, tagName, o.ReservationId.TagName) + assert.EqualValues(t, datasetID.String(), o.GetReservationId().GetDatasetId().String()) + assert.EqualValues(t, tagName, o.GetReservationId().GetTagName()) return true }), ).Return(nil, status.Error(codes.NotFound, "reservation not found")) diff --git a/flytepropeller/pkg/controller/nodes/catalog/datacatalog/transformer.go b/flytepropeller/pkg/controller/nodes/catalog/datacatalog/transformer.go index ba94bdadec..403d1a6885 100644 --- a/flytepropeller/pkg/controller/nodes/catalog/datacatalog/transformer.go +++ b/flytepropeller/pkg/controller/nodes/catalog/datacatalog/transformer.go @@ -26,43 +26,43 @@ var emptyLiteralMap = core.LiteralMap{Literals: map[string]*core.Literal{}} var emptyVariableMap = core.VariableMap{Variables: map[string]*core.Variable{}} func getDatasetNameFromTask(taskID core.Identifier) string { - return fmt.Sprintf("%s-%s", taskNamespace, taskID.Name) + return fmt.Sprintf("%s-%s", taskNamespace, taskID.GetName()) } // Transform the artifact Data into task execution outputs as a literal map func GenerateTaskOutputsFromArtifact(id core.Identifier, taskInterface core.TypedInterface, artifact *datacatalog.Artifact) (*core.LiteralMap, error) { // if there are no outputs in the task, return empty map - if taskInterface.Outputs == nil || len(taskInterface.Outputs.Variables) == 0 { + if taskInterface.GetOutputs() == nil || len(taskInterface.GetOutputs().GetVariables()) == 0 { return &emptyLiteralMap, nil } - outputVariables := taskInterface.Outputs.Variables - artifactDataList := artifact.Data + outputVariables := taskInterface.GetOutputs().GetVariables() + artifactDataList := artifact.GetData() // verify the task outputs matches what is stored in ArtifactData if len(outputVariables) != len(artifactDataList) { - return nil, fmt.Errorf("the task %s with %d outputs, should have %d artifactData for artifact %s", id.String(), len(outputVariables), len(artifactDataList), artifact.Id) + return nil, fmt.Errorf("the task %s with %d outputs, should have %d artifactData for artifact %s", id.String(), len(outputVariables), len(artifactDataList), artifact.GetId()) } outputs := make(map[string]*core.Literal, len(artifactDataList)) for _, artifactData := range artifactDataList { // verify that the name and type of artifactData matches what is expected from the interface - if _, ok := outputVariables[artifactData.Name]; !ok { - return nil, fmt.Errorf("unexpected artifactData with name [%v] does not match any task output variables %v", artifactData.Name, reflect.ValueOf(outputVariables).MapKeys()) + if _, ok := outputVariables[artifactData.GetName()]; !ok { + return nil, fmt.Errorf("unexpected artifactData with name [%v] does not match any task output variables %v", artifactData.GetName(), reflect.ValueOf(outputVariables).MapKeys()) } - expectedVarType := outputVariables[artifactData.Name].GetType() - inputType := validators.LiteralTypeForLiteral(artifactData.Value) + expectedVarType := outputVariables[artifactData.GetName()].GetType() + inputType := validators.LiteralTypeForLiteral(artifactData.GetValue()) err := validators.ValidateLiteralType(inputType) if err != nil { - return nil, fmt.Errorf("failed to validate literal type for %s with err: %s", artifactData.Name, err) + return nil, fmt.Errorf("failed to validate literal type for %s with err: %s", artifactData.GetName(), err) } if !validators.AreTypesCastable(inputType, expectedVarType) { - return nil, fmt.Errorf("unexpected artifactData: [%v] type: [%v] does not match any task output type: [%v]", artifactData.Name, inputType, expectedVarType) + return nil, fmt.Errorf("unexpected artifactData: [%v] type: [%v] does not match any task output type: [%v]", artifactData.GetName(), inputType, expectedVarType) } - outputs[artifactData.Name] = artifactData.Value + outputs[artifactData.GetName()] = artifactData.GetValue() } return &core.LiteralMap{Literals: outputs}, nil @@ -86,12 +86,12 @@ func generateTaskSignatureHash(ctx context.Context, taskInterface core.TypedInte taskInputs := &emptyVariableMap taskOutputs := &emptyVariableMap - if taskInterface.Inputs != nil && len(taskInterface.Inputs.Variables) != 0 { - taskInputs = taskInterface.Inputs + if taskInterface.GetInputs() != nil && len(taskInterface.GetInputs().GetVariables()) != 0 { + taskInputs = taskInterface.GetInputs() } - if taskInterface.Outputs != nil && len(taskInterface.Outputs.Variables) != 0 { - taskOutputs = taskInterface.Outputs + if taskInterface.GetOutputs() != nil && len(taskInterface.GetOutputs().GetVariables()) != 0 { + taskOutputs = taskInterface.GetOutputs() } inputHash, err := pbhash.ComputeHash(ctx, taskInputs) @@ -138,8 +138,8 @@ func GenerateDatasetIDForTask(ctx context.Context, k catalog.Key) (*datacatalog. } datasetID := &datacatalog.DatasetID{ - Project: k.Identifier.Project, - Domain: k.Identifier.Domain, + Project: k.Identifier.GetProject(), + Domain: k.Identifier.GetDomain(), Name: getDatasetNameFromTask(k.Identifier), Version: datasetVersion, } @@ -150,7 +150,7 @@ func DatasetIDToIdentifier(id *datacatalog.DatasetID) *core.Identifier { if id == nil { return nil } - return &core.Identifier{ResourceType: core.ResourceType_DATASET, Name: id.Name, Project: id.Project, Domain: id.Domain, Version: id.Version} + return &core.Identifier{ResourceType: core.ResourceType_DATASET, Name: id.GetName(), Project: id.GetProject(), Domain: id.GetDomain(), Version: id.GetVersion()} } // With Node-Node relationship this is bound to change. So lets keep it extensible @@ -175,7 +175,7 @@ func GetDatasetMetadataForSource(taskExecutionID *core.TaskExecutionIdentifier) } return &datacatalog.Metadata{ KeyMap: map[string]string{ - taskVersionKey: taskExecutionID.TaskId.Version, + taskVersionKey: taskExecutionID.GetTaskId().GetVersion(), }, } } @@ -186,10 +186,10 @@ func GetArtifactMetadataForSource(taskExecutionID *core.TaskExecutionIdentifier) } return &datacatalog.Metadata{ KeyMap: map[string]string{ - execProjectKey: taskExecutionID.NodeExecutionId.GetExecutionId().GetProject(), - execDomainKey: taskExecutionID.NodeExecutionId.GetExecutionId().GetDomain(), - execNameKey: taskExecutionID.NodeExecutionId.GetExecutionId().GetName(), - execNodeIDKey: taskExecutionID.NodeExecutionId.GetNodeId(), + execProjectKey: taskExecutionID.GetNodeExecutionId().GetExecutionId().GetProject(), + execDomainKey: taskExecutionID.GetNodeExecutionId().GetExecutionId().GetDomain(), + execNameKey: taskExecutionID.GetNodeExecutionId().GetExecutionId().GetName(), + execNodeIDKey: taskExecutionID.GetNodeExecutionId().GetNodeId(), execTaskAttemptKey: strconv.Itoa(int(taskExecutionID.GetRetryAttempt())), }, } @@ -207,7 +207,7 @@ func GetSourceFromMetadata(datasetMd, artifactMd *datacatalog.Metadata, currentI } // Jul-06-2020 DataCatalog stores only wfExecutionKey & taskVersionKey So we will default the project / domain to the current dataset's project domain - val := GetOrDefault(artifactMd.KeyMap, execTaskAttemptKey, "0") + val := GetOrDefault(artifactMd.GetKeyMap(), execTaskAttemptKey, "0") attempt, err := strconv.ParseUint(val, 10, 32) if err != nil { return nil, fmt.Errorf("failed to parse [%v] to integer. Error: %w", val, err) @@ -215,19 +215,19 @@ func GetSourceFromMetadata(datasetMd, artifactMd *datacatalog.Metadata, currentI return &core.TaskExecutionIdentifier{ TaskId: &core.Identifier{ - ResourceType: currentID.ResourceType, - Project: currentID.Project, - Domain: currentID.Domain, - Name: currentID.Name, - Version: GetOrDefault(datasetMd.KeyMap, taskVersionKey, "unknown"), + ResourceType: currentID.GetResourceType(), + Project: currentID.GetProject(), + Domain: currentID.GetDomain(), + Name: currentID.GetName(), + Version: GetOrDefault(datasetMd.GetKeyMap(), taskVersionKey, "unknown"), }, RetryAttempt: uint32(attempt), NodeExecutionId: &core.NodeExecutionIdentifier{ - NodeId: GetOrDefault(artifactMd.KeyMap, execNodeIDKey, "unknown"), + NodeId: GetOrDefault(artifactMd.GetKeyMap(), execNodeIDKey, "unknown"), ExecutionId: &core.WorkflowExecutionIdentifier{ - Project: GetOrDefault(artifactMd.KeyMap, execProjectKey, currentID.GetProject()), - Domain: GetOrDefault(artifactMd.KeyMap, execDomainKey, currentID.GetDomain()), - Name: GetOrDefault(artifactMd.KeyMap, execNameKey, "unknown"), + Project: GetOrDefault(artifactMd.GetKeyMap(), execProjectKey, currentID.GetProject()), + Domain: GetOrDefault(artifactMd.GetKeyMap(), execDomainKey, currentID.GetDomain()), + Name: GetOrDefault(artifactMd.GetKeyMap(), execNameKey, "unknown"), }, }, }, nil @@ -241,8 +241,8 @@ func EventCatalogMetadata(datasetID *datacatalog.DatasetID, tag *datacatalog.Tag if tag != nil { md.ArtifactTag = &core.CatalogArtifactTag{ - ArtifactId: tag.ArtifactId, - Name: tag.Name, + ArtifactId: tag.GetArtifactId(), + Name: tag.GetName(), } } diff --git a/flytepropeller/pkg/controller/nodes/catalog/datacatalog/transformer_test.go b/flytepropeller/pkg/controller/nodes/catalog/datacatalog/transformer_test.go index 92e4c82926..6fd6455e02 100644 --- a/flytepropeller/pkg/controller/nodes/catalog/datacatalog/transformer_test.go +++ b/flytepropeller/pkg/controller/nodes/catalog/datacatalog/transformer_test.go @@ -32,8 +32,8 @@ func TestNilParamTask(t *testing.T) { } datasetID, err := GenerateDatasetIDForTask(context.TODO(), key) assert.NoError(t, err) - assert.NotEmpty(t, datasetID.Version) - assert.Equal(t, "1.0.0-GKw-c0Pw-GKw-c0Pw", datasetID.Version) + assert.NotEmpty(t, datasetID.GetVersion()) + assert.Equal(t, "1.0.0-GKw-c0Pw-GKw-c0Pw", datasetID.GetVersion()) } // Ensure that empty parameters generate the same dataset as nil parameters @@ -53,8 +53,8 @@ func TestEmptyParamTask(t *testing.T) { } datasetID, err := GenerateDatasetIDForTask(context.TODO(), key) assert.NoError(t, err) - assert.NotEmpty(t, datasetID.Version) - assert.Equal(t, "1.0.0-GKw-c0Pw-GKw-c0Pw", datasetID.Version) + assert.NotEmpty(t, datasetID.GetVersion()) + assert.Equal(t, "1.0.0-GKw-c0Pw-GKw-c0Pw", datasetID.GetVersion()) key.TypedInterface.Inputs = nil key.TypedInterface.Outputs = nil @@ -84,8 +84,8 @@ func TestVariableMapOrder(t *testing.T) { } datasetID, err := GenerateDatasetIDForTask(context.TODO(), key) assert.NoError(t, err) - assert.NotEmpty(t, datasetID.Version) - assert.Equal(t, "1.0.0-UxVtPm0k-GKw-c0Pw", datasetID.Version) + assert.NotEmpty(t, datasetID.GetVersion()) + assert.Equal(t, "1.0.0-UxVtPm0k-GKw-c0Pw", datasetID.GetVersion()) key.TypedInterface.Inputs = &core.VariableMap{ Variables: map[string]*core.Variable{ @@ -96,7 +96,7 @@ func TestVariableMapOrder(t *testing.T) { datasetIDDupe, err := GenerateDatasetIDForTask(context.TODO(), key) assert.NoError(t, err) - assert.Equal(t, "1.0.0-UxVtPm0k-GKw-c0Pw", datasetIDDupe.Version) + assert.Equal(t, "1.0.0-UxVtPm0k-GKw-c0Pw", datasetIDDupe.GetVersion()) assert.Equal(t, datasetID.String(), datasetIDDupe.String()) } @@ -173,17 +173,17 @@ func TestGetArtifactMetadataForSource(t *testing.T) { }{ {"nil TaskExec", args{}, nil}, {"TaskExec", args{tID}, map[string]string{ - execTaskAttemptKey: strconv.Itoa(int(tID.RetryAttempt)), - execProjectKey: tID.NodeExecutionId.ExecutionId.Project, - execDomainKey: tID.NodeExecutionId.ExecutionId.Domain, - execNodeIDKey: tID.NodeExecutionId.NodeId, - execNameKey: tID.NodeExecutionId.ExecutionId.Name, + execTaskAttemptKey: strconv.Itoa(int(tID.GetRetryAttempt())), + execProjectKey: tID.GetNodeExecutionId().GetExecutionId().GetProject(), + execDomainKey: tID.GetNodeExecutionId().GetExecutionId().GetDomain(), + execNodeIDKey: tID.GetNodeExecutionId().GetNodeId(), + execNameKey: tID.GetNodeExecutionId().GetExecutionId().GetName(), }}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := GetArtifactMetadataForSource(tt.args.taskExecutionID); !reflect.DeepEqual(got.KeyMap, tt.want) { - t.Errorf("GetMetadataForSource() = %v, want %v", got.KeyMap, tt.want) + if got := GetArtifactMetadataForSource(tt.args.taskExecutionID); !reflect.DeepEqual(got.GetKeyMap(), tt.want) { + t.Errorf("GetMetadataForSource() = %v, want %v", got.GetKeyMap(), tt.want) } }) } @@ -247,13 +247,13 @@ func TestGetSourceFromMetadata(t *testing.T) { RetryAttempt: 0, }}, // In legacy only taskVersionKey is available - {"legacy", args{datasetMd: GetDatasetMetadataForSource(&tID).KeyMap, currentID: currentTaskID}, &core.TaskExecutionIdentifier{ + {"legacy", args{datasetMd: GetDatasetMetadataForSource(&tID).GetKeyMap(), currentID: currentTaskID}, &core.TaskExecutionIdentifier{ TaskId: &core.Identifier{ ResourceType: core.ResourceType_TASK, Name: "x", Project: "project", Domain: "development", - Version: tID.TaskId.Version, + Version: tID.GetTaskId().GetVersion(), }, NodeExecutionId: &core.NodeExecutionIdentifier{ ExecutionId: &core.WorkflowExecutionIdentifier{ @@ -266,7 +266,7 @@ func TestGetSourceFromMetadata(t *testing.T) { RetryAttempt: 0, }}, // Completely available - {"latest", args{datasetMd: GetDatasetMetadataForSource(&tID).KeyMap, artifactMd: GetArtifactMetadataForSource(&tID).KeyMap, currentID: currentTaskID}, &tID}, + {"latest", args{datasetMd: GetDatasetMetadataForSource(&tID).GetKeyMap(), artifactMd: GetArtifactMetadataForSource(&tID).GetKeyMap(), currentID: currentTaskID}, &tID}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -325,11 +325,11 @@ func TestEventCatalogMetadata(t *testing.T) { func TestDatasetIDToIdentifier(t *testing.T) { id := DatasetIDToIdentifier(&datacatalog.DatasetID{Project: "p", Domain: "d", Name: "n", Version: "v"}) - assert.Equal(t, core.ResourceType_DATASET, id.ResourceType) - assert.Equal(t, "n", id.Name) - assert.Equal(t, "p", id.Project) - assert.Equal(t, "d", id.Domain) - assert.Equal(t, "v", id.Version) + assert.Equal(t, core.ResourceType_DATASET, id.GetResourceType()) + assert.Equal(t, "n", id.GetName()) + assert.Equal(t, "p", id.GetProject()) + assert.Equal(t, "d", id.GetDomain()) + assert.Equal(t, "v", id.GetVersion()) } func TestGenerateTaskOutputsFromArtifact_IDLNotFound(t *testing.T) { diff --git a/flytepropeller/pkg/controller/nodes/common/utils.go b/flytepropeller/pkg/controller/nodes/common/utils.go index dd16b53f3a..839be0c99f 100644 --- a/flytepropeller/pkg/controller/nodes/common/utils.go +++ b/flytepropeller/pkg/controller/nodes/common/utils.go @@ -76,7 +76,7 @@ func GetTargetEntity(ctx context.Context, nCtx interfaces.NodeExecutionContext) // This doesn't feed a very important part of the node execution event, swallow it for now. logger.Errorf(ctx, "Failed to get task [%v] with error [%v]", taskID, err) } - targetEntity = taskID.CoreTask().Id + targetEntity = taskID.CoreTask().GetId() } return targetEntity } @@ -111,7 +111,7 @@ func OffloadLargeLiteral(ctx context.Context, datastore *storage.DataStore, data if literalSizeMB >= literalOffloadingConfig.MaxSizeInMBForOffloading { errString := fmt.Sprintf("Literal size [%d] MB is larger than the max size [%d] MB for offloading", literalSizeMB, literalOffloadingConfig.MaxSizeInMBForOffloading) logger.Errorf(ctx, errString) - return fmt.Errorf(errString) + return fmt.Errorf(errString) //nolint:govet,staticcheck } if literalSizeMB < literalOffloadingConfig.MinSizeInMBForOffloading { logger.Debugf(ctx, "Literal size [%d] MB is smaller than the min size [%d] MB for offloading", literalSizeMB, literalOffloadingConfig.MinSizeInMBForOffloading) @@ -122,7 +122,7 @@ func OffloadLargeLiteral(ctx context.Context, datastore *storage.DataStore, data if inferredType == nil { errString := "Failed to determine literal type for offloaded literal" logger.Errorf(ctx, errString) - return fmt.Errorf(errString) + return fmt.Errorf(errString) //nolint:govet,staticcheck } // offload the literal @@ -145,7 +145,7 @@ func OffloadLargeLiteral(ctx context.Context, datastore *storage.DataStore, data toBeOffloaded.Value = &idlcore.Literal_OffloadedMetadata{ OffloadedMetadata: &idlcore.LiteralOffloadedMetadata{ Uri: dataReference.String(), - SizeBytes: uint64(literalSizeBytes), + SizeBytes: uint64(literalSizeBytes), // #nosec G115 InferredType: inferredType, }, } diff --git a/flytepropeller/pkg/controller/nodes/common/utils_test.go b/flytepropeller/pkg/controller/nodes/common/utils_test.go index 875ede858b..bde50c8040 100644 --- a/flytepropeller/pkg/controller/nodes/common/utils_test.go +++ b/flytepropeller/pkg/controller/nodes/common/utils_test.go @@ -147,8 +147,8 @@ func TestOffloadLargeLiteral(t *testing.T) { assert.NoError(t, err) assert.Equal(t, "foo/bar", toBeOffloaded.GetOffloadedMetadata().GetUri()) assert.Equal(t, uint64(6), toBeOffloaded.GetOffloadedMetadata().GetSizeBytes()) - assert.Equal(t, inferredType.GetSimple(), toBeOffloaded.GetOffloadedMetadata().InferredType.GetSimple()) - assert.Equal(t, base64.RawURLEncoding.EncodeToString(expectedLiteralDigest), toBeOffloaded.Hash) + assert.Equal(t, inferredType.GetSimple(), toBeOffloaded.GetOffloadedMetadata().GetInferredType().GetSimple()) + assert.Equal(t, base64.RawURLEncoding.EncodeToString(expectedLiteralDigest), toBeOffloaded.GetHash()) }) t.Run("offload successful with valid size and hash passed in", func(t *testing.T) { @@ -175,7 +175,7 @@ func TestOffloadLargeLiteral(t *testing.T) { } err := OffloadLargeLiteral(ctx, datastore, dataReference, toBeOffloaded, literalOffloadingConfig) assert.NoError(t, err) - assert.Equal(t, "hash", toBeOffloaded.Hash) + assert.Equal(t, "hash", toBeOffloaded.GetHash()) }) t.Run("offload fails with size larger than max", func(t *testing.T) { diff --git a/flytepropeller/pkg/controller/nodes/dynamic/dynamic_workflow.go b/flytepropeller/pkg/controller/nodes/dynamic/dynamic_workflow.go index 95e8f4c0bb..de0108d4dc 100644 --- a/flytepropeller/pkg/controller/nodes/dynamic/dynamic_workflow.go +++ b/flytepropeller/pkg/controller/nodes/dynamic/dynamic_workflow.go @@ -46,8 +46,8 @@ func setEphemeralNodeExecutionStatusAttributes(ctx context.Context, djSpec *core // We keep track of the original node ids because that's where flytekit inputs are written to in the case of legacy // map tasks. The modern map tasks do not write input files any longer and this entire piece of code can be removed. parentNodeID := nCtx.NodeID() - for _, node := range djSpec.Nodes { - nodeID := node.Id + for _, node := range djSpec.GetNodes() { + nodeID := node.GetId() var subNodeStatus v1alpha1.ExecutableNodeStatus newID, err := hierarchicalNodeID(parentNodeID, currentAttemptStr, nodeID) if err != nil { @@ -98,16 +98,16 @@ func (d dynamicNodeTaskNodeHandler) buildDynamicWorkflowTemplate(ctx context.Con return nil, errors.Wrapf("TaskReadFailed", err, "Failed to find task [%v].", nCtx.TaskReader().GetTaskID()) } - for _, t := range djSpec.Tasks { + for _, t := range djSpec.GetTasks() { if t.GetContainer() != nil && parentTask.GetContainer() != nil { - t.GetContainer().Config = append(t.GetContainer().Config, parentTask.GetContainer().Config...) + t.GetContainer().Config = append(t.GetContainer().Config, parentTask.GetContainer().GetConfig()...) } } } if nCtx.ExecutionContext().GetEventVersion() == v1alpha1.EventVersion0 { - for _, o := range djSpec.Outputs { - err = updateBindingNodeIDsWithLineage(parentNodeID, currentAttemptStr, o.Binding) + for _, o := range djSpec.GetOutputs() { + err = updateBindingNodeIDsWithLineage(parentNodeID, currentAttemptStr, o.GetBinding()) if err != nil { return nil, err } @@ -115,14 +115,14 @@ func (d dynamicNodeTaskNodeHandler) buildDynamicWorkflowTemplate(ctx context.Con } return &core.WorkflowTemplate{ Id: &core.Identifier{ - Project: nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetExecutionId().Project, - Domain: nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetExecutionId().Domain, - Name: fmt.Sprintf(dynamicWfNameTemplate, nCtx.NodeExecutionMetadata().GetNodeExecutionID().NodeId), + Project: nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetExecutionId().GetProject(), + Domain: nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetExecutionId().GetDomain(), + Name: fmt.Sprintf(dynamicWfNameTemplate, nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetNodeId()), Version: rand.String(10), ResourceType: core.ResourceType_WORKFLOW, }, - Nodes: djSpec.Nodes, - Outputs: djSpec.Outputs, + Nodes: djSpec.GetNodes(), + Outputs: djSpec.GetOutputs(), Interface: iface, }, nil } @@ -228,14 +228,14 @@ func (d dynamicNodeTaskNodeHandler) buildDynamicWorkflow(ctx context.Context, nC return nil, nil, errors.Wrapf(utils.ErrorCodeSystem, err, "failed to build dynamic workflow template") } - compiledTasks, err := compileTasks(ctx, djSpec.Tasks) + compiledTasks, err := compileTasks(ctx, djSpec.GetTasks()) if err != nil { return nil, nil, errors.Wrapf(utils.ErrorCodeUser, err, "failed to compile dynamic tasks") } // Get the requirements, that is, a list of all the task IDs and the launch plan IDs that will be called as part of this dynamic task. // The definition of these will need to be fetched from Admin (in order to get the interface). - requirements, err := compiler.GetRequirements(wf, djSpec.Subworkflows) + requirements, err := compiler.GetRequirements(wf, djSpec.GetSubworkflows()) if err != nil { return nil, nil, errors.Wrapf(utils.ErrorCodeUser, err, "failed to Get requirements for subworkflows") } @@ -251,7 +251,7 @@ func (d dynamicNodeTaskNodeHandler) buildDynamicWorkflow(ctx context.Context, nC // See https://github.com/flyteorg/flyte/issues/219 for more information. var closure *core.CompiledWorkflowClosure - closure, err = compiler.CompileWorkflow(wf, djSpec.Subworkflows, compiledTasks, launchPlanInterfaces) + closure, err = compiler.CompileWorkflow(wf, djSpec.GetSubworkflows(), compiledTasks, launchPlanInterfaces) if err != nil { return nil, nil, errors.Wrapf(utils.ErrorCodeUser, err, "malformed dynamic workflow") } @@ -348,10 +348,10 @@ func (d dynamicNodeTaskNodeHandler) getLaunchPlanInterfaces(ctx context.Context, logger.Debugf(ctx, "Error fetching launch plan definition from admin") if launchplan.IsNotFound(err) || launchplan.IsUserError(err) { return nil, errors.Wrapf(utils.ErrorCodeUser, err, "incorrectly specified launchplan %s:%s:%s:%s", - id.Project, id.Domain, id.Name, id.Version) + id.GetProject(), id.GetDomain(), id.GetName(), id.GetVersion()) } return nil, errors.Wrapf(utils.ErrorCodeSystem, err, "unable to retrieve launchplan information %s:%s:%s:%s", - id.Project, id.Domain, id.Name, id.Version) + id.GetProject(), id.GetDomain(), id.GetName(), id.GetVersion()) } launchPlanInterfaces[idx] = compiler.NewLaunchPlanInterfaceProvider(lp) } diff --git a/flytepropeller/pkg/controller/nodes/dynamic/dynamic_workflow_test.go b/flytepropeller/pkg/controller/nodes/dynamic/dynamic_workflow_test.go index 3cb27dd65f..ec20c14cd0 100644 --- a/flytepropeller/pkg/controller/nodes/dynamic/dynamic_workflow_test.go +++ b/flytepropeller/pkg/controller/nodes/dynamic/dynamic_workflow_test.go @@ -532,7 +532,7 @@ func Test_dynamicNodeHandler_buildContextualDynamicWorkflow_withLaunchPlans(t *t } mockLPLauncher := &mocks5.Reader{} mockLPLauncher.OnGetLaunchPlanMatch(mock.Anything, mock.MatchedBy(func(id *core.Identifier) bool { - return lpID.Name == id.Name && lpID.Domain == id.Domain && lpID.Project == id.Project && lpID.ResourceType == id.ResourceType + return lpID.GetName() == id.GetName() && lpID.GetDomain() == id.GetDomain() && lpID.GetProject() == id.GetProject() && lpID.GetResourceType() == id.GetResourceType() })).Return(&admin.LaunchPlan{ Id: lpID, Closure: &admin.LaunchPlanClosure{ diff --git a/flytepropeller/pkg/controller/nodes/dynamic/handler.go b/flytepropeller/pkg/controller/nodes/dynamic/handler.go index e23f145bb3..d8977eacdb 100644 --- a/flytepropeller/pkg/controller/nodes/dynamic/handler.go +++ b/flytepropeller/pkg/controller/nodes/dynamic/handler.go @@ -103,9 +103,9 @@ func (d dynamicNodeTaskNodeHandler) produceDynamicWorkflow(ctx context.Context, return handler.Transition{}, handler.DynamicNodeState{}, err } taskNodeInfoMetadata := &event.TaskNodeMetadata{} - if dCtx.subWorkflowClosure != nil && dCtx.subWorkflowClosure.Primary != nil && dCtx.subWorkflowClosure.Primary.Template != nil { + if dCtx.subWorkflowClosure != nil && dCtx.subWorkflowClosure.GetPrimary() != nil && dCtx.subWorkflowClosure.GetPrimary().GetTemplate() != nil { taskNodeInfoMetadata.DynamicWorkflow = &event.DynamicWorkflowNodeMetadata{ - Id: dCtx.subWorkflowClosure.Primary.Template.Id, + Id: dCtx.subWorkflowClosure.GetPrimary().GetTemplate().GetId(), CompiledWorkflow: dCtx.subWorkflowClosure, DynamicJobSpecUri: dCtx.dynamicJobSpecURI, } diff --git a/flytepropeller/pkg/controller/nodes/dynamic/utils.go b/flytepropeller/pkg/controller/nodes/dynamic/utils.go index 690cbe06a1..6a75e551c7 100644 --- a/flytepropeller/pkg/controller/nodes/dynamic/utils.go +++ b/flytepropeller/pkg/controller/nodes/dynamic/utils.go @@ -21,7 +21,7 @@ func underlyingInterface(ctx context.Context, taskReader interfaces.TaskReader) } if t.GetInterface() != nil { - iface.Outputs = t.GetInterface().Outputs + iface.Outputs = t.GetInterface().GetOutputs() } return iface, nil } @@ -31,21 +31,21 @@ func hierarchicalNodeID(parentNodeID, retryAttempt, nodeID string) (string, erro } func updateBindingNodeIDsWithLineage(parentNodeID, retryAttempt string, binding *core.BindingData) (err error) { - switch b := binding.Value.(type) { + switch b := binding.GetValue().(type) { case *core.BindingData_Promise: - b.Promise.NodeId, err = hierarchicalNodeID(parentNodeID, retryAttempt, b.Promise.NodeId) + b.Promise.NodeId, err = hierarchicalNodeID(parentNodeID, retryAttempt, b.Promise.GetNodeId()) if err != nil { return err } case *core.BindingData_Collection: - for _, item := range b.Collection.Bindings { + for _, item := range b.Collection.GetBindings() { err = updateBindingNodeIDsWithLineage(parentNodeID, retryAttempt, item) if err != nil { return err } } case *core.BindingData_Map: - for _, item := range b.Map.Bindings { + for _, item := range b.Map.GetBindings() { err = updateBindingNodeIDsWithLineage(parentNodeID, retryAttempt, item) if err != nil { return err @@ -60,7 +60,7 @@ func compileTasks(_ context.Context, tasks []*core.TaskTemplate) ([]*core.Compil compiledTasks := make([]*core.CompiledTask, 0, len(tasks)) visitedTasks := sets.NewString() for _, t := range tasks { - if visitedTasks.Has(t.Id.String()) { + if visitedTasks.Has(t.GetId().String()) { continue } @@ -70,7 +70,7 @@ func compileTasks(_ context.Context, tasks []*core.TaskTemplate) ([]*core.Compil } compiledTasks = append(compiledTasks, ct) - visitedTasks.Insert(t.Id.String()) + visitedTasks.Insert(t.GetId().String()) } return compiledTasks, nil diff --git a/flytepropeller/pkg/controller/nodes/dynamic/utils_test.go b/flytepropeller/pkg/controller/nodes/dynamic/utils_test.go index 6afdd487b9..ecc1904c49 100644 --- a/flytepropeller/pkg/controller/nodes/dynamic/utils_test.go +++ b/flytepropeller/pkg/controller/nodes/dynamic/utils_test.go @@ -62,5 +62,5 @@ func TestUnderlyingInterface(t *testing.T) { iface, err = underlyingInterface(context.TODO(), tr) assert.NoError(t, err) assert.NotNil(t, iface) - assert.Nil(t, iface.Outputs) + assert.Nil(t, iface.GetOutputs()) } diff --git a/flytepropeller/pkg/controller/nodes/executor.go b/flytepropeller/pkg/controller/nodes/executor.go index b25ad64fb6..04adfc5d54 100644 --- a/flytepropeller/pkg/controller/nodes/executor.go +++ b/flytepropeller/pkg/controller/nodes/executor.go @@ -522,19 +522,19 @@ func (c *nodeExecutor) RecordTransitionLatency(ctx context.Context, dag executor func (c *nodeExecutor) recoverInputs(ctx context.Context, nCtx interfaces.NodeExecutionContext, recovered *admin.NodeExecution, recoveredData *admin.NodeExecutionGetDataResponse) (*core.LiteralMap, error) { - nodeInputs := recoveredData.FullInputs + nodeInputs := recoveredData.GetFullInputs() if nodeInputs != nil { if err := c.store.WriteProtobuf(ctx, nCtx.InputReader().GetInputPath(), storage.Options{}, nodeInputs); err != nil { c.metrics.InputsWriteFailure.Inc(ctx) logger.Errorf(ctx, "Failed to move recovered inputs for Node. Error [%v]. InputsFile [%s]", err, nCtx.InputReader().GetInputPath()) return nil, errors.Wrapf(errors.StorageError, nCtx.NodeID(), err, "Failed to store inputs for Node. InputsFile [%s]", nCtx.InputReader().GetInputPath()) } - } else if len(recovered.InputUri) > 0 { + } else if len(recovered.GetInputUri()) > 0 { // If the inputs are too large they won't be returned inline in the RecoverData call. We must fetch them before copying them. nodeInputs = &core.LiteralMap{} - if recoveredData.FullInputs == nil { - if err := c.store.ReadProtobuf(ctx, storage.DataReference(recovered.InputUri), nodeInputs); err != nil { - return nil, errors.Wrapf(errors.InputsNotFoundError, nCtx.NodeID(), err, "failed to read data from dataDir [%v].", recovered.InputUri) + if recoveredData.GetFullInputs() == nil { + if err := c.store.ReadProtobuf(ctx, storage.DataReference(recovered.GetInputUri()), nodeInputs); err != nil { + return nil, errors.Wrapf(errors.InputsNotFoundError, nCtx.NodeID(), err, "failed to read data from dataDir [%v].", recovered.GetInputUri()) } } @@ -549,11 +549,11 @@ func (c *nodeExecutor) recoverInputs(ctx context.Context, nCtx interfaces.NodeEx } func (c *nodeExecutor) attemptRecovery(ctx context.Context, nCtx interfaces.NodeExecutionContext) (handler.PhaseInfo, error) { - fullyQualifiedNodeID := nCtx.NodeExecutionMetadata().GetNodeExecutionID().NodeId + fullyQualifiedNodeID := nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetNodeId() if nCtx.ExecutionContext().GetEventVersion() != v1alpha1.EventVersion0 { // compute fully qualified node id (prefixed with parent id and retry attempt) to ensure uniqueness var err error - fullyQualifiedNodeID, err = common.GenerateUniqueID(nCtx.ExecutionContext().GetParentInfo(), nCtx.NodeExecutionMetadata().GetNodeExecutionID().NodeId) + fullyQualifiedNodeID, err = common.GenerateUniqueID(nCtx.ExecutionContext().GetParentInfo(), nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetNodeId()) if err != nil { return handler.PhaseInfoUndefined, err } @@ -572,13 +572,13 @@ func (c *nodeExecutor) attemptRecovery(ctx context.Context, nCtx interfaces.Node logger.Warnf(ctx, "call to recover node [%+v] returned no error but also no node", nCtx.NodeExecutionMetadata().GetNodeExecutionID()) return handler.PhaseInfoUndefined, nil } - if recovered.Closure == nil { + if recovered.GetClosure() == nil { logger.Warnf(ctx, "Fetched node execution [%+v] data but was missing closure. Will not attempt to recover", nCtx.NodeExecutionMetadata().GetNodeExecutionID()) return handler.PhaseInfoUndefined, nil } // A recoverable node execution should always be in a terminal phase - switch recovered.Closure.Phase { + switch recovered.GetClosure().GetPhase() { case core.NodeExecution_SKIPPED: return handler.PhaseInfoUndefined, nil case core.NodeExecution_SUCCEEDED: @@ -588,9 +588,9 @@ func (c *nodeExecutor) attemptRecovery(ctx context.Context, nCtx interfaces.Node default: // The node execution may be partially recoverable through intra task checkpointing. Save the checkpoint // uri in the task node state to pass to the task handler later on. - if metadata, ok := recovered.Closure.TargetMetadata.(*admin.NodeExecutionClosure_TaskNodeMetadata); ok { + if metadata, ok := recovered.GetClosure().GetTargetMetadata().(*admin.NodeExecutionClosure_TaskNodeMetadata); ok { state := nCtx.NodeStateReader().GetTaskNodeState() - state.PreviousNodeExecutionCheckpointURI = storage.DataReference(metadata.TaskNodeMetadata.CheckpointUri) + state.PreviousNodeExecutionCheckpointURI = storage.DataReference(metadata.TaskNodeMetadata.GetCheckpointUri()) err = nCtx.NodeStateWriter().PutTaskNodeState(state) if err != nil { logger.Warnf(ctx, "failed to save recovered checkpoint uri for [%+v]: [%+v]", @@ -601,7 +601,7 @@ func (c *nodeExecutor) attemptRecovery(ctx context.Context, nCtx interfaces.Node // if this node is a dynamic task we attempt to recover the compiled workflow from instances where the parent // task succeeded but the dynamic task did not complete. this is important to ensure correctness since node ids // within the compiled closure may not be generated deterministically. - if recovered.Metadata != nil && recovered.Metadata.IsDynamic && len(recovered.Closure.DynamicJobSpecUri) > 0 { + if recovered.GetMetadata() != nil && recovered.GetMetadata().GetIsDynamic() && len(recovered.GetClosure().GetDynamicJobSpecUri()) > 0 { // recover node inputs recoveredData, err := c.recoveryClient.RecoverNodeExecutionData(ctx, nCtx.ExecutionContext().GetExecutionConfig().RecoveryExecution.WorkflowExecutionIdentifier, fullyQualifiedNodeID) @@ -619,7 +619,7 @@ func (c *nodeExecutor) attemptRecovery(ctx context.Context, nCtx interfaces.Node return handler.PhaseInfoUndefined, err } - dynamicJobSpecReference := storage.DataReference(recovered.Closure.DynamicJobSpecUri) + dynamicJobSpecReference := storage.DataReference(recovered.GetClosure().GetDynamicJobSpecUri()) if err := nCtx.DataStore().CopyRaw(ctx, dynamicJobSpecReference, f.GetLoc(), storage.Options{}); err != nil { return handler.PhaseInfoUndefined, errors.Wrapf(errors.StorageError, nCtx.NodeID(), err, "failed to store dynamic job spec for node. source file [%s] destination file [%s]", dynamicJobSpecReference, f.GetLoc()) @@ -635,7 +635,7 @@ func (c *nodeExecutor) attemptRecovery(ctx context.Context, nCtx interfaces.Node return handler.PhaseInfoRunning(&handler.ExecutionInfo{}), nil } - logger.Debugf(ctx, "Node [%+v] phase [%v] is not recoverable", nCtx.NodeExecutionMetadata().GetNodeExecutionID(), recovered.Closure.Phase) + logger.Debugf(ctx, "Node [%+v] phase [%v] is not recoverable", nCtx.NodeExecutionMetadata().GetNodeExecutionID(), recovered.GetClosure().GetPhase()) return handler.PhaseInfoUndefined, nil } @@ -662,13 +662,13 @@ func (c *nodeExecutor) attemptRecovery(ctx context.Context, nCtx interfaces.Node // Similarly, copy outputs' reference so := storage.Options{} var outputs = &core.LiteralMap{} - if recoveredData.FullOutputs != nil { - outputs = recoveredData.FullOutputs - } else if recovered.Closure.GetOutputData() != nil { - outputs = recovered.Closure.GetOutputData() - } else if len(recovered.Closure.GetOutputUri()) > 0 { - if err := c.store.ReadProtobuf(ctx, storage.DataReference(recovered.Closure.GetOutputUri()), outputs); err != nil { - return handler.PhaseInfoUndefined, errors.Wrapf(errors.InputsNotFoundError, nCtx.NodeID(), err, "failed to read output data [%v].", recovered.Closure.GetOutputUri()) + if recoveredData.GetFullOutputs() != nil { + outputs = recoveredData.GetFullOutputs() + } else if recovered.GetClosure().GetOutputData() != nil { + outputs = recovered.GetClosure().GetOutputData() + } else if len(recovered.GetClosure().GetOutputUri()) > 0 { + if err := c.store.ReadProtobuf(ctx, storage.DataReference(recovered.GetClosure().GetOutputUri()), outputs); err != nil { + return handler.PhaseInfoUndefined, errors.Wrapf(errors.InputsNotFoundError, nCtx.NodeID(), err, "failed to read output data [%v].", recovered.GetClosure().GetOutputUri()) } } else { logger.Debugf(ctx, "No outputs found for recovered node [%+v]", nCtx.NodeExecutionMetadata().GetNodeExecutionID()) @@ -679,7 +679,7 @@ func (c *nodeExecutor) attemptRecovery(ctx context.Context, nCtx interfaces.Node OutputURI: outputFile, } - deckFile := storage.DataReference(recovered.Closure.GetDeckUri()) + deckFile := storage.DataReference(recovered.GetClosure().GetDeckUri()) if len(deckFile) > 0 { metadata, err := nCtx.DataStore().Head(ctx, deckFile) if err != nil { @@ -702,24 +702,24 @@ func (c *nodeExecutor) attemptRecovery(ctx context.Context, nCtx interfaces.Node OutputInfo: oi, } - if recovered.Closure.GetTaskNodeMetadata() != nil { + if recovered.GetClosure().GetTaskNodeMetadata() != nil { taskNodeInfo := &handler.TaskNodeInfo{ TaskNodeMetadata: &event.TaskNodeMetadata{ - CatalogKey: recovered.Closure.GetTaskNodeMetadata().CatalogKey, - CacheStatus: recovered.Closure.GetTaskNodeMetadata().CacheStatus, + CatalogKey: recovered.GetClosure().GetTaskNodeMetadata().GetCatalogKey(), + CacheStatus: recovered.GetClosure().GetTaskNodeMetadata().GetCacheStatus(), }, } - if recoveredData.DynamicWorkflow != nil { + if recoveredData.GetDynamicWorkflow() != nil { taskNodeInfo.TaskNodeMetadata.DynamicWorkflow = &event.DynamicWorkflowNodeMetadata{ - Id: recoveredData.DynamicWorkflow.Id, - CompiledWorkflow: recoveredData.DynamicWorkflow.CompiledWorkflow, + Id: recoveredData.GetDynamicWorkflow().GetId(), + CompiledWorkflow: recoveredData.GetDynamicWorkflow().GetCompiledWorkflow(), } } info.TaskNodeInfo = taskNodeInfo - } else if recovered.Closure.GetWorkflowNodeMetadata() != nil { + } else if recovered.GetClosure().GetWorkflowNodeMetadata() != nil { logger.Warnf(ctx, "Attempted to recover node") info.WorkflowNodeInfo = &handler.WorkflowNodeInfo{ - LaunchedWorkflowID: recovered.Closure.GetWorkflowNodeMetadata().ExecutionId, + LaunchedWorkflowID: recovered.GetClosure().GetWorkflowNodeMetadata().GetExecutionId(), } } return handler.PhaseInfoRecovered(info), nil @@ -765,7 +765,7 @@ func (c *nodeExecutor) preExecute(ctx context.Context, dag executors.DAGStructur } if nodeInputs != nil { - p := common.CheckOffloadingCompat(ctx, nCtx, nodeInputs.Literals, node, c.literalOffloadingConfig) + p := common.CheckOffloadingCompat(ctx, nCtx, nodeInputs.GetLiterals(), node, c.literalOffloadingConfig) if p != nil { return *p, nil } @@ -809,7 +809,7 @@ func (c *nodeExecutor) isEligibleForRetry(nCtx interfaces.NodeExecutionContext, if config.GetConfig().NodeConfig.IgnoreRetryCause { currentAttempt = nodeStatus.GetAttempts() + 1 } else { - if err.Kind == core.ExecutionError_SYSTEM { + if err.GetKind() == core.ExecutionError_SYSTEM { currentAttempt = nodeStatus.GetSystemFailures() maxAttempts = c.maxNodeRetriesForSystemFailures isEligible = currentAttempt < c.maxNodeRetriesForSystemFailures @@ -818,9 +818,9 @@ func (c *nodeExecutor) isEligibleForRetry(nCtx interfaces.NodeExecutionContext, currentAttempt = (nodeStatus.GetAttempts() + 1) - nodeStatus.GetSystemFailures() } - maxAttempts = uint32(config.GetConfig().NodeConfig.DefaultMaxAttempts) + maxAttempts = uint32(config.GetConfig().NodeConfig.DefaultMaxAttempts) // #nosec G115 if nCtx.Node().GetRetryStrategy() != nil && nCtx.Node().GetRetryStrategy().MinAttempts != nil && *nCtx.Node().GetRetryStrategy().MinAttempts != 1 { - maxAttempts = uint32(*nCtx.Node().GetRetryStrategy().MinAttempts) + maxAttempts = uint32(*nCtx.Node().GetRetryStrategy().MinAttempts) // #nosec G115 } isEligible = currentAttempt < maxAttempts return @@ -864,8 +864,8 @@ func (c *nodeExecutor) execute(ctx context.Context, h interfaces.NodeHandler, nC if !isEligible { return handler.PhaseInfoFailure( core.ExecutionError_USER, - fmt.Sprintf("RetriesExhausted|%s", phase.GetErr().Code), - fmt.Sprintf("[%d/%d] currentAttempt done. Last Error: %s::%s", currentAttempt, maxAttempts, phase.GetErr().Kind.String(), phase.GetErr().Message), + fmt.Sprintf("RetriesExhausted|%s", phase.GetErr().GetCode()), + fmt.Sprintf("[%d/%d] currentAttempt done. Last Error: %s::%s", currentAttempt, maxAttempts, phase.GetErr().GetKind().String(), phase.GetErr().GetMessage()), phase.GetInfo(), ), nil } @@ -894,11 +894,11 @@ func (c *nodeExecutor) Abort(ctx context.Context, h interfaces.NodeHandler, nCtx // only send event if this is the final transition for this node if finalTransition { nodeExecutionID := &core.NodeExecutionIdentifier{ - ExecutionId: nCtx.NodeExecutionMetadata().GetNodeExecutionID().ExecutionId, - NodeId: nCtx.NodeExecutionMetadata().GetNodeExecutionID().NodeId, + ExecutionId: nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetExecutionId(), + NodeId: nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetNodeId(), } if nCtx.ExecutionContext().GetEventVersion() != v1alpha1.EventVersion0 { - currentNodeUniqueID, err := common.GenerateUniqueID(nCtx.ExecutionContext().GetParentInfo(), nodeExecutionID.NodeId) + currentNodeUniqueID, err := common.GenerateUniqueID(nCtx.ExecutionContext().GetParentInfo(), nodeExecutionID.GetNodeId()) if err != nil { return err } @@ -1483,7 +1483,7 @@ func NewExecutor(ctx context.Context, nodeConfig config.NodeConfig, store *stora eventConfig: eventConfig, literalOffloadingConfig: literalOffloadingConfig, interruptibleFailureThreshold: nodeConfig.InterruptibleFailureThreshold, - maxNodeRetriesForSystemFailures: uint32(nodeConfig.MaxNodeRetriesOnSystemFailures), + maxNodeRetriesForSystemFailures: uint32(nodeConfig.MaxNodeRetriesOnSystemFailures), // #nosec G115 metrics: metrics, nodeRecorder: events.NewNodeEventRecorder(eventSink, nodeScope, store), outputResolver: NewRemoteFileOutputResolver(store), diff --git a/flytepropeller/pkg/controller/nodes/executor_test.go b/flytepropeller/pkg/controller/nodes/executor_test.go index 35ab105623..329d52540d 100644 --- a/flytepropeller/pkg/controller/nodes/executor_test.go +++ b/flytepropeller/pkg/controller/nodes/executor_test.go @@ -779,7 +779,7 @@ func TestNodeExecutor_RecursiveNodeHandler_Recurse(t *testing.T) { evRecorder := &eventMocks.NodeEventRecorder{} evRecorder.OnRecordNodeEventMatch(mock.Anything, mock.MatchedBy(func(ev *event.NodeExecutionEvent) bool { assert.NotNil(t, ev) - assert.Equal(t, test.eventPhase, ev.Phase) + assert.Equal(t, test.eventPhase, ev.GetPhase()) called = true return true }), mock.Anything).Return(nil) @@ -893,7 +893,7 @@ func TestNodeExecutor_RecursiveNodeHandler_Recurse(t *testing.T) { evRecorder := &eventMocks.NodeEventRecorder{} evRecorder.OnRecordNodeEventMatch(mock.Anything, mock.MatchedBy(func(ev *event.NodeExecutionEvent) bool { assert.NotNil(t, ev) - assert.Equal(t, test.eventPhase, ev.Phase) + assert.Equal(t, test.eventPhase, ev.GetPhase()) called = true return true }), mock.Anything).Return(nil) @@ -939,7 +939,7 @@ func TestNodeExecutor_RecursiveNodeHandler_Recurse(t *testing.T) { } else { assert.Nil(t, s.Err) } - assert.Equal(t, uint32(test.attempts), mockNodeStatus.GetAttempts()) + assert.Equal(t, uint32(test.attempts), mockNodeStatus.GetAttempts()) // #nosec G115 assert.Equal(t, test.eventRecorded, called, "event recording expected: %v, but got %v", test.eventRecorded, called) }) } @@ -1770,18 +1770,18 @@ func TestNodeExecutionEventStartNode(t *testing.T) { }, subWfID) assert.NoError(t, err) - assert.Equal(t, "start-node", ev.Id.NodeId) - assert.Equal(t, execID, ev.Id.ExecutionId) - assert.Empty(t, ev.SpecNodeId) - assert.Nil(t, ev.ParentNodeMetadata) - assert.Equal(t, tID, ev.ParentTaskMetadata.Id) - assert.Empty(t, ev.NodeName) - assert.Empty(t, ev.RetryGroup) + assert.Equal(t, "start-node", ev.GetId().GetNodeId()) + assert.Equal(t, execID, ev.GetId().GetExecutionId()) + assert.Empty(t, ev.GetSpecNodeId()) + assert.Nil(t, ev.GetParentNodeMetadata()) + assert.Equal(t, tID, ev.GetParentTaskMetadata().GetId()) + assert.Empty(t, ev.GetNodeName()) + assert.Empty(t, ev.GetRetryGroup()) assert.Equal(t, "dummy://dummyOutUrl/outputs.pb", - ev.OutputResult.(*event.NodeExecutionEvent_OutputUri).OutputUri) - assert.Equal(t, ev.ProducerId, testClusterID) + ev.GetOutputResult().(*event.NodeExecutionEvent_OutputUri).OutputUri) + assert.Equal(t, ev.GetProducerId(), testClusterID) assert.Equal(t, subWfID, ev.GetTargetEntity()) - assert.Nil(t, ev.InputValue) + assert.Nil(t, ev.GetInputValue()) } func TestNodeExecutionEventV0(t *testing.T) { @@ -1817,14 +1817,14 @@ func TestNodeExecutionEventV0(t *testing.T) { RawOutputPolicy: config.RawOutputPolicyReference, }, nil) assert.NoError(t, err) - assert.Equal(t, "n1", ev.Id.NodeId) - assert.Equal(t, execID, ev.Id.ExecutionId) - assert.Empty(t, ev.SpecNodeId) - assert.Nil(t, ev.ParentNodeMetadata) - assert.Equal(t, tID, ev.ParentTaskMetadata.Id) - assert.Empty(t, ev.NodeName) - assert.Empty(t, ev.RetryGroup) - assert.Empty(t, ev.TargetEntity) + assert.Equal(t, "n1", ev.GetId().GetNodeId()) + assert.Equal(t, execID, ev.GetId().GetExecutionId()) + assert.Empty(t, ev.GetSpecNodeId()) + assert.Nil(t, ev.GetParentNodeMetadata()) + assert.Equal(t, tID, ev.GetParentTaskMetadata().GetId()) + assert.Empty(t, ev.GetNodeName()) + assert.Empty(t, ev.GetRetryGroup()) + assert.Empty(t, ev.GetTargetEntity()) assert.Equal(t, "reference", ev.GetInputUri()) } @@ -1870,18 +1870,18 @@ func TestNodeExecutionEventV1(t *testing.T) { }, nil) assert.NoError(t, err) - assert.Equal(t, "np1-2-n1", eventOpt.Id.NodeId) - assert.Equal(t, execID, eventOpt.Id.ExecutionId) - assert.Equal(t, "id", eventOpt.SpecNodeId) + assert.Equal(t, "np1-2-n1", eventOpt.GetId().GetNodeId()) + assert.Equal(t, execID, eventOpt.GetId().GetExecutionId()) + assert.Equal(t, "id", eventOpt.GetSpecNodeId()) expectParentMetadata := event.ParentNodeExecutionMetadata{ NodeId: "np1", } - assert.Equal(t, expectParentMetadata, *eventOpt.ParentNodeMetadata) - assert.Nil(t, eventOpt.ParentTaskMetadata) - assert.Equal(t, "name", eventOpt.NodeName) - assert.Equal(t, "2", eventOpt.RetryGroup) + assert.True(t, proto.Equal(&expectParentMetadata, eventOpt.GetParentNodeMetadata())) + assert.Nil(t, eventOpt.GetParentTaskMetadata()) + assert.Equal(t, "name", eventOpt.GetNodeName()) + assert.Equal(t, "2", eventOpt.GetRetryGroup()) assert.True(t, proto.Equal(eventOpt.GetInputData(), inputs)) - assert.Empty(t, eventOpt.TargetEntity) + assert.Empty(t, eventOpt.GetTargetEntity()) assert.Equal(t, inputs, eventOpt.GetInputData()) } @@ -2326,8 +2326,8 @@ func TestRecover(t *testing.T) { }, CacheStatus: core.CatalogCacheStatus_CACHE_HIT, DynamicWorkflow: &event.DynamicWorkflowNodeMetadata{ - Id: dynamicWorkflow.Id, - CompiledWorkflow: dynamicWorkflow.CompiledWorkflow, + Id: dynamicWorkflow.GetId(), + CompiledWorkflow: dynamicWorkflow.GetCompiledWorkflow(), }, }, phaseInfo.GetInfo().TaskNodeInfo.TaskNodeMetadata)) }) diff --git a/flytepropeller/pkg/controller/nodes/gate/handler.go b/flytepropeller/pkg/controller/nodes/gate/handler.go index 00d2cb989f..c1308c8ed3 100644 --- a/flytepropeller/pkg/controller/nodes/gate/handler.go +++ b/flytepropeller/pkg/controller/nodes/gate/handler.go @@ -84,7 +84,7 @@ func (g *gateNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecut request := &admin.SignalGetOrCreateRequest{ Id: &core.SignalIdentifier{ ExecutionId: nCtx.ExecutionContext().GetExecutionID().WorkflowExecutionIdentifier, - SignalId: approveCondition.SignalId, + SignalId: approveCondition.GetSignalId(), }, Type: &core.LiteralType{ Type: &core.LiteralType_Simple{ @@ -99,10 +99,10 @@ func (g *gateNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecut } // if signal has value then check for approval - if signal.Value != nil && signal.Value.Value != nil { - approved, ok := getBoolean(signal.Value) + if signal.GetValue() != nil && signal.Value.Value != nil { + approved, ok := getBoolean(signal.GetValue()) if !ok { - errMsg := fmt.Sprintf("received a non-boolean approve signal value [%v]", signal.Value) + errMsg := fmt.Sprintf("received a non-boolean approve signal value [%v]", signal.GetValue()) return handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoFailure(core.ExecutionError_UNKNOWN, errors.RuntimeExecutionError, errMsg, nil)), nil } @@ -143,9 +143,9 @@ func (g *gateNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecut request := &admin.SignalGetOrCreateRequest{ Id: &core.SignalIdentifier{ ExecutionId: nCtx.ExecutionContext().GetExecutionID().WorkflowExecutionIdentifier, - SignalId: signalCondition.SignalId, + SignalId: signalCondition.GetSignalId(), }, - Type: signalCondition.Type, + Type: signalCondition.GetType(), } signal, err := g.signalClient.GetOrCreateSignal(ctx, request) @@ -154,10 +154,10 @@ func (g *gateNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecut } // if signal has value then write to output and transition to success - if signal.Value != nil && signal.Value.Value != nil { + if signal.GetValue() != nil && signal.Value.Value != nil { outputs := &core.LiteralMap{ Literals: map[string]*core.Literal{ - signalCondition.OutputVariableName: signal.Value, + signalCondition.GetOutputVariableName(): signal.GetValue(), }, } @@ -218,9 +218,9 @@ func New(eventConfig *config.EventConfig, signalClient service.SignalServiceClie } func getBoolean(literal *core.Literal) (bool, bool) { - if scalarValue, ok := literal.Value.(*core.Literal_Scalar); ok { - if primitiveValue, ok := scalarValue.Scalar.Value.(*core.Scalar_Primitive); ok { - if booleanValue, ok := primitiveValue.Primitive.Value.(*core.Primitive_Boolean); ok { + if scalarValue, ok := literal.GetValue().(*core.Literal_Scalar); ok { + if primitiveValue, ok := scalarValue.Scalar.GetValue().(*core.Scalar_Primitive); ok { + if booleanValue, ok := primitiveValue.Primitive.GetValue().(*core.Primitive_Boolean); ok { return booleanValue.Boolean, true } } diff --git a/flytepropeller/pkg/controller/nodes/handler/state.go b/flytepropeller/pkg/controller/nodes/handler/state.go index a7fa7bdf87..c3e35e67d7 100644 --- a/flytepropeller/pkg/controller/nodes/handler/state.go +++ b/flytepropeller/pkg/controller/nodes/handler/state.go @@ -48,11 +48,12 @@ type GateNodeState struct { } type ArrayNodeState struct { - Phase v1alpha1.ArrayNodePhase - TaskPhaseVersion uint32 - Error *core.ExecutionError - SubNodePhases bitarray.CompactArray - SubNodeTaskPhases bitarray.CompactArray - SubNodeRetryAttempts bitarray.CompactArray - SubNodeSystemFailures bitarray.CompactArray + Phase v1alpha1.ArrayNodePhase + TaskPhaseVersion uint32 + Error *core.ExecutionError + SubNodePhases bitarray.CompactArray + SubNodeTaskPhases bitarray.CompactArray + SubNodeRetryAttempts bitarray.CompactArray + SubNodeSystemFailures bitarray.CompactArray + SubNodeDeltaTimestamps bitarray.CompactArray } diff --git a/flytepropeller/pkg/controller/nodes/handler/transition_info.go b/flytepropeller/pkg/controller/nodes/handler/transition_info.go index c9af525cca..7e787a9424 100644 --- a/flytepropeller/pkg/controller/nodes/handler/transition_info.go +++ b/flytepropeller/pkg/controller/nodes/handler/transition_info.go @@ -173,7 +173,7 @@ func phaseInfoFailed(p EPhase, err *core.ExecutionError, info *ExecutionInfo) Ph } } - return phaseInfo(p, err, info, err.Message) + return phaseInfo(p, err, info, err.GetMessage()) } func PhaseInfoFailure(kind core.ExecutionError_ErrorKind, code, reason string, info *ExecutionInfo) PhaseInfo { diff --git a/flytepropeller/pkg/controller/nodes/handler/transition_info_test.go b/flytepropeller/pkg/controller/nodes/handler/transition_info_test.go index 883dbd5f45..91042fc588 100644 --- a/flytepropeller/pkg/controller/nodes/handler/transition_info_test.go +++ b/flytepropeller/pkg/controller/nodes/handler/transition_info_test.go @@ -110,8 +110,8 @@ func TestPhaseInfo(t *testing.T) { assert.Equal(t, EPhaseFailed, p.GetPhase()) assert.Equal(t, i, p.GetInfo()) if assert.NotNil(t, p.GetErr()) { - assert.Equal(t, "code", p.GetErr().Code) - assert.Equal(t, "reason", p.GetErr().Message) + assert.Equal(t, "code", p.GetErr().GetCode()) + assert.Equal(t, "reason", p.GetErr().GetMessage()) } assert.NotNil(t, p.GetOccurredAt()) }) @@ -141,8 +141,8 @@ func TestPhaseInfo(t *testing.T) { assert.Equal(t, EPhaseRetryableFailure, p.GetPhase()) assert.Equal(t, i, p.GetInfo()) if assert.NotNil(t, p.GetErr()) { - assert.Equal(t, "code", p.GetErr().Code) - assert.Equal(t, "reason", p.GetErr().Message) + assert.Equal(t, "code", p.GetErr().GetCode()) + assert.Equal(t, "reason", p.GetErr().GetMessage()) } assert.NotNil(t, p.GetOccurredAt()) }) diff --git a/flytepropeller/pkg/controller/nodes/node_exec_context.go b/flytepropeller/pkg/controller/nodes/node_exec_context.go index 7de31100c6..9721d2af6c 100644 --- a/flytepropeller/pkg/controller/nodes/node_exec_context.go +++ b/flytepropeller/pkg/controller/nodes/node_exec_context.go @@ -39,16 +39,16 @@ func (e eventRecorder) RecordTaskEvent(ctx context.Context, ev *event.TaskExecut if eventConfig.ErrorOnAlreadyExists { return err } - logger.Warningf(ctx, "Failed to record taskEvent, error [%s]. Trying to record state: %s. Ignoring this error!", err.Error(), ev.Phase) + logger.Warningf(ctx, "Failed to record taskEvent, error [%s]. Trying to record state: %s. Ignoring this error!", err.Error(), ev.GetPhase()) return nil } else if eventsErr.IsEventAlreadyInTerminalStateError(err) { - if IsTerminalTaskPhase(ev.Phase) { + if IsTerminalTaskPhase(ev.GetPhase()) { // Event is terminal and the stored value in flyteadmin is already terminal. This implies aborted case. So ignoring - logger.Warningf(ctx, "Failed to record taskEvent, error [%s]. Trying to record state: %s. Ignoring this error!", err.Error(), ev.Phase) + logger.Warningf(ctx, "Failed to record taskEvent, error [%s]. Trying to record state: %s. Ignoring this error!", err.Error(), ev.GetPhase()) return nil } - logger.Warningf(ctx, "Failed to record taskEvent in state: %s, error: %s", ev.Phase, err) - return errors.Wrapf(err, "failed to record task event, as it already exists in terminal state. Event state: %s", ev.Phase) + logger.Warningf(ctx, "Failed to record taskEvent in state: %s, error: %s", ev.GetPhase(), err) + return errors.Wrapf(err, "failed to record task event, as it already exists in terminal state. Event state: %s", ev.GetPhase()) } return err } @@ -60,30 +60,30 @@ func (e eventRecorder) RecordNodeEvent(ctx context.Context, nodeEvent *event.Nod return fmt.Errorf("event recording attempt of Nil Node execution event") } - if nodeEvent.Id == nil { + if nodeEvent.GetId() == nil { return fmt.Errorf("event recording attempt of with nil node Event ID") } - logger.Infof(ctx, "Recording NodeEvent [%s] phase[%s]", nodeEvent.GetId().String(), nodeEvent.Phase.String()) + logger.Infof(ctx, "Recording NodeEvent [%s] phase[%s]", nodeEvent.GetId().String(), nodeEvent.GetPhase().String()) err := e.nodeEventRecorder.RecordNodeEvent(ctx, nodeEvent, eventConfig) if err != nil { - if nodeEvent.GetId().NodeId == v1alpha1.EndNodeID { + if nodeEvent.GetId().GetNodeId() == v1alpha1.EndNodeID { return nil } if eventsErr.IsAlreadyExists(err) { logger.Infof(ctx, "Node event phase: %s, nodeId %s already exist", - nodeEvent.Phase.String(), nodeEvent.GetId().NodeId) + nodeEvent.GetPhase().String(), nodeEvent.GetId().GetNodeId()) return nil } else if eventsErr.IsEventAlreadyInTerminalStateError(err) { - if IsTerminalNodePhase(nodeEvent.Phase) { + if IsTerminalNodePhase(nodeEvent.GetPhase()) { // Event was trying to record a different terminal phase for an already terminal event. ignoring. logger.Infof(ctx, "Node event phase: %s, nodeId %s already in terminal phase. err: %s", - nodeEvent.Phase.String(), nodeEvent.GetId().NodeId, err.Error()) + nodeEvent.GetPhase().String(), nodeEvent.GetId().GetNodeId(), err.Error()) return nil } logger.Warningf(ctx, "Failed to record nodeEvent, error [%s]", err.Error()) - return nodeerrors.Wrapf(nodeerrors.IllegalStateError, nodeEvent.Id.NodeId, err, "phase mismatch mismatch between propeller and control plane; Trying to record Node p: %s", nodeEvent.Phase) + return nodeerrors.Wrapf(nodeerrors.IllegalStateError, nodeEvent.GetId().GetNodeId(), err, "phase mismatch mismatch between propeller and control plane; Trying to record Node p: %s", nodeEvent.GetPhase()) } } return err @@ -223,7 +223,7 @@ func newNodeExecContext(_ context.Context, store *storage.DataStore, execContext } nodeLabels[NodeIDLabel] = utils.SanitizeLabelValue(node.GetID()) if tr != nil && tr.GetTaskID() != nil { - nodeLabels[TaskNameLabel] = utils.SanitizeLabelValue(tr.GetTaskID().Name) + nodeLabels[TaskNameLabel] = utils.SanitizeLabelValue(tr.GetTaskID().GetName()) } nodeLabels[NodeInterruptibleLabel] = strconv.FormatBool(interruptible) md.nodeLabels = nodeLabels @@ -290,9 +290,9 @@ func (c *nodeExecutor) BuildNodeExecutionContext(ctx context.Context, executionC if config.GetConfig().NodeConfig.IgnoreRetryCause { // For the unified retry behavior we execute the last interruptibleFailureThreshold attempts on a non // interruptible machine - maxAttempts := uint32(config.GetConfig().NodeConfig.DefaultMaxAttempts) + maxAttempts := uint32(config.GetConfig().NodeConfig.DefaultMaxAttempts) // #nosec G115 if n.GetRetryStrategy() != nil && n.GetRetryStrategy().MinAttempts != nil && *n.GetRetryStrategy().MinAttempts != 1 { - maxAttempts = uint32(*n.GetRetryStrategy().MinAttempts) + maxAttempts = uint32(*n.GetRetryStrategy().MinAttempts) // #nosec G115 } // For interruptible nodes run at least one attempt on an interruptible machine (thus s.GetAttempts() > 0) even if there won't be any retries diff --git a/flytepropeller/pkg/controller/nodes/node_exec_context_test.go b/flytepropeller/pkg/controller/nodes/node_exec_context_test.go index 4614d0f035..2f421a7a7b 100644 --- a/flytepropeller/pkg/controller/nodes/node_exec_context_test.go +++ b/flytepropeller/pkg/controller/nodes/node_exec_context_test.go @@ -147,10 +147,10 @@ func Test_NodeContextDefault(t *testing.T) { // Test that retrieving task nodes taskIdentifier := common.GetTargetEntity(ctx, nodeExecContext) - assert.Equal(t, w1.Tasks["taskID"].TaskTemplate.Id.Project, taskIdentifier.Project) - assert.Equal(t, w1.Tasks["taskID"].TaskTemplate.Id.Domain, taskIdentifier.Domain) - assert.Equal(t, w1.Tasks["taskID"].TaskTemplate.Id.Name, taskIdentifier.Name) - assert.Equal(t, w1.Tasks["taskID"].TaskTemplate.Id.Version, taskIdentifier.Version) + assert.Equal(t, w1.Tasks["taskID"].TaskTemplate.GetId().GetProject(), taskIdentifier.GetProject()) + assert.Equal(t, w1.Tasks["taskID"].TaskTemplate.GetId().GetDomain(), taskIdentifier.GetDomain()) + assert.Equal(t, w1.Tasks["taskID"].TaskTemplate.GetId().GetName(), taskIdentifier.GetName()) + assert.Equal(t, w1.Tasks["taskID"].TaskTemplate.GetId().GetVersion(), taskIdentifier.GetVersion()) } func TestGetTargetEntity_LaunchPlanNode(t *testing.T) { @@ -173,10 +173,10 @@ func TestGetTargetEntity_LaunchPlanNode(t *testing.T) { nCtx.OnNode().Return(n) fetchedID := common.GetTargetEntity(context.Background(), nCtx) - assert.Equal(t, id.Project, fetchedID.Project) - assert.Equal(t, id.Domain, fetchedID.Domain) - assert.Equal(t, id.Name, fetchedID.Name) - assert.Equal(t, id.Version, fetchedID.Version) + assert.Equal(t, id.GetProject(), fetchedID.GetProject()) + assert.Equal(t, id.GetDomain(), fetchedID.GetDomain()) + assert.Equal(t, id.GetName(), fetchedID.GetName()) + assert.Equal(t, id.GetVersion(), fetchedID.GetVersion()) } func TestGetTargetEntity_EmptyTask(t *testing.T) { diff --git a/flytepropeller/pkg/controller/nodes/node_state_manager.go b/flytepropeller/pkg/controller/nodes/node_state_manager.go index a9ead9afc3..25b0bc55df 100644 --- a/flytepropeller/pkg/controller/nodes/node_state_manager.go +++ b/flytepropeller/pkg/controller/nodes/node_state_manager.go @@ -80,7 +80,7 @@ func (n nodeStateManager) GetTaskNodeState() handler.TaskNodeState { tn := n.nodeStatus.GetTaskNodeStatus() if tn != nil { return handler.TaskNodeState{ - PluginPhase: pluginCore.Phase(tn.GetPhase()), + PluginPhase: pluginCore.Phase(tn.GetPhase()), // #nosec G115 PluginPhaseVersion: tn.GetPhaseVersion(), PluginStateVersion: tn.GetPluginStateVersion(), PluginState: tn.GetPluginState(), @@ -181,6 +181,11 @@ func (n nodeStateManager) GetArrayNodeState() handler.ArrayNodeState { if subNodeSystemFailuresCopy := subNodeSystemFailures.DeepCopy(); subNodeSystemFailuresCopy != nil { as.SubNodeSystemFailures = *subNodeSystemFailuresCopy } + + subNodeDeltaTimestamps := an.GetSubNodeDeltaTimestamps() + if subNodeDeltaTimestampsCopy := subNodeDeltaTimestamps.DeepCopy(); subNodeDeltaTimestampsCopy != nil { + as.SubNodeDeltaTimestamps = *subNodeDeltaTimestampsCopy + } } return as } diff --git a/flytepropeller/pkg/controller/nodes/output_resolver.go b/flytepropeller/pkg/controller/nodes/output_resolver.go index df8a6dfe19..620064d2ac 100644 --- a/flytepropeller/pkg/controller/nodes/output_resolver.go +++ b/flytepropeller/pkg/controller/nodes/output_resolver.go @@ -86,7 +86,7 @@ func resolveSubtaskOutput(ctx context.Context, store storage.ProtobufStore, node "Outputs not found at [%v]", outputsFileRef) } - l, ok := d.Literals[varName] + l, ok := d.GetLiterals()[varName] if !ok { return nil, errors.Errorf(errors.BadSpecificationError, nodeID, "Output of array tasks is expected to be "+ "a single literal map entry named 'array' of type LiteralCollection.") @@ -97,7 +97,7 @@ func resolveSubtaskOutput(ctx context.Context, store storage.ProtobufStore, node "is of type [%v]. LiteralCollection is expected.", reflect.TypeOf(l.GetValue())) } - literals := l.GetCollection().Literals + literals := l.GetCollection().GetLiterals() if idx >= len(literals) { return nil, errors.Errorf(errors.OutputsNotFoundError, nodeID, "Failed to find [%v[%v].%v]", nodeID, idx, varName) @@ -120,7 +120,7 @@ func resolveSingleOutput(ctx context.Context, store storage.ProtobufStore, nodeI "Outputs not found at [%v]", outputsFileRef) } - l, ok := d.Literals[varName] + l, ok := d.GetLiterals()[varName] if !ok { return nil, errors.Errorf(errors.OutputsNotFoundError, nodeID, "Failed to find [%v].[%v]", nodeID, varName) diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/handler_test.go b/flytepropeller/pkg/controller/nodes/subworkflow/handler_test.go index ea21ce1171..f819a47f0b 100644 --- a/flytepropeller/pkg/controller/nodes/subworkflow/handler_test.go +++ b/flytepropeller/pkg/controller/nodes/subworkflow/handler_test.go @@ -174,11 +174,11 @@ func TestWorkflowNodeHandler_StartNode_Launchplan(t *testing.T) { mockLPExec.OnLaunchMatch( ctx, mock.MatchedBy(func(o launchplan.LaunchContext) bool { - return o.ParentNodeExecution.NodeId == mockNode.GetID() && - o.ParentNodeExecution.ExecutionId == wfExecID + return o.ParentNodeExecution.GetNodeId() == mockNode.GetID() && + o.ParentNodeExecution.GetExecutionId() == wfExecID }), mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), mock.MatchedBy(func(o *core.Identifier) bool { return lpID == o }), mock.MatchedBy(func(o *core.LiteralMap) bool { return o.Literals == nil }), @@ -200,11 +200,11 @@ func TestWorkflowNodeHandler_StartNode_Launchplan(t *testing.T) { mockLPExec.OnLaunchMatch( ctx, mock.MatchedBy(func(o launchplan.LaunchContext) bool { - return o.ParentNodeExecution.NodeId == mockNode.GetID() && - o.ParentNodeExecution.ExecutionId == wfExecID + return o.ParentNodeExecution.GetNodeId() == mockNode.GetID() && + o.ParentNodeExecution.GetExecutionId() == wfExecID }), mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), mock.MatchedBy(func(o *core.Identifier) bool { return lpID == o }), mock.MatchedBy(func(o *core.LiteralMap) bool { return o.Literals == nil }), @@ -256,7 +256,7 @@ func TestWorkflowNodeHandler_CheckNodeStatus(t *testing.T) { mockLPExec.OnGetStatusMatch( ctx, mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), ).Return(&admin.ExecutionClosure{ Phase: core.WorkflowExecution_RUNNING, @@ -277,7 +277,7 @@ func TestWorkflowNodeHandler_CheckNodeStatus(t *testing.T) { mockLPExec.OnGetStatusMatch( ctx, mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), ).Return(&admin.ExecutionClosure{ Phase: core.WorkflowExecution_RUNNING, @@ -329,7 +329,7 @@ func TestWorkflowNodeHandler_AbortNode(t *testing.T) { mockLPExec.OnKillMatch( ctx, mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), mock.AnythingOfType(reflect.String.String()), ).Return(nil) @@ -351,7 +351,7 @@ func TestWorkflowNodeHandler_AbortNode(t *testing.T) { mockLPExec.OnKillMatch( ctx, mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), mock.AnythingOfType(reflect.String.String()), ).Return(nil) @@ -371,7 +371,7 @@ func TestWorkflowNodeHandler_AbortNode(t *testing.T) { mockLPExec.OnKillMatch( ctx, mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), mock.AnythingOfType(reflect.String.String()), ).Return(expectedErr) diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan.go b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan.go index 60802a6486..16d0134740 100644 --- a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan.go +++ b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan.go @@ -28,17 +28,17 @@ type launchPlanHandler struct { func getParentNodeExecutionID(nCtx interfaces.NodeExecutionContext) (*core.NodeExecutionIdentifier, error) { nodeExecID := &core.NodeExecutionIdentifier{ - ExecutionId: nCtx.NodeExecutionMetadata().GetNodeExecutionID().ExecutionId, + ExecutionId: nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetExecutionId(), } if nCtx.ExecutionContext().GetEventVersion() != v1alpha1.EventVersion0 { var err error - currentNodeUniqueID, err := common.GenerateUniqueID(nCtx.ExecutionContext().GetParentInfo(), nCtx.NodeExecutionMetadata().GetNodeExecutionID().NodeId) + currentNodeUniqueID, err := common.GenerateUniqueID(nCtx.ExecutionContext().GetParentInfo(), nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetNodeId()) if err != nil { return nil, err } nodeExecID.NodeId = currentNodeUniqueID } else { - nodeExecID.NodeId = nCtx.NodeExecutionMetadata().GetNodeExecutionID().NodeId + nodeExecID.NodeId = nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetNodeId() } return nodeExecID, nil } @@ -77,11 +77,11 @@ func (l *launchPlanHandler) StartLaunchPlan(ctx context.Context, nCtx interfaces } if nCtx.ExecutionContext().GetExecutionConfig().RecoveryExecution.WorkflowExecutionIdentifier != nil { - fullyQualifiedNodeID := nCtx.NodeExecutionMetadata().GetNodeExecutionID().NodeId + fullyQualifiedNodeID := nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetNodeId() if nCtx.ExecutionContext().GetEventVersion() != v1alpha1.EventVersion0 { // compute fully qualified node id (prefixed with parent id and retry attempt) to ensure uniqueness var err error - fullyQualifiedNodeID, err = common.GenerateUniqueID(nCtx.ExecutionContext().GetParentInfo(), nCtx.NodeExecutionMetadata().GetNodeExecutionID().NodeId) + fullyQualifiedNodeID, err = common.GenerateUniqueID(nCtx.ExecutionContext().GetParentInfo(), nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetNodeId()) if err != nil { return handler.UnknownTransition, err } @@ -94,11 +94,11 @@ func (l *launchPlanHandler) StartLaunchPlan(ctx context.Context, nCtx interfaces logger.Warnf(ctx, "Failed to recover workflow node [%+v] with err [%+v]", nCtx.NodeExecutionMetadata().GetNodeExecutionID(), err) } } - if recovered != nil && recovered.Closure != nil && recovered.Closure.Phase == core.NodeExecution_SUCCEEDED { - if recovered.Closure.GetWorkflowNodeMetadata() != nil { - launchCtx.RecoveryExecution = recovered.Closure.GetWorkflowNodeMetadata().ExecutionId + if recovered != nil && recovered.GetClosure() != nil && recovered.GetClosure().GetPhase() == core.NodeExecution_SUCCEEDED { + if recovered.GetClosure().GetWorkflowNodeMetadata() != nil { + launchCtx.RecoveryExecution = recovered.GetClosure().GetWorkflowNodeMetadata().GetExecutionId() } else { - logger.Debugf(ctx, "Attempted to recovered workflow node execution [%+v] but was missing workflow node metadata", recovered.Id) + logger.Debugf(ctx, "Attempted to recovered workflow node execution [%+v] but was missing workflow node metadata", recovered.GetId()) } } } @@ -106,7 +106,7 @@ func (l *launchPlanHandler) StartLaunchPlan(ctx context.Context, nCtx interfaces nodeInputs, nCtx.NodeExecutionMetadata().GetOwnerID().String()) if err != nil { if launchplan.IsAlreadyExists(err) { - logger.Infof(ctx, "Execution already exists [%s].", childID.Name) + logger.Infof(ctx, "Execution already exists [%s].", childID.GetName()) } else if launchplan.IsUserError(err) { return handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoFailure(core.ExecutionError_USER, errors.RuntimeExecutionError, err.Error(), nil)), nil } else { @@ -114,7 +114,7 @@ func (l *launchPlanHandler) StartLaunchPlan(ctx context.Context, nCtx interfaces } } else { eCtx := nCtx.ExecutionContext() - logger.Infof(ctx, "Launched launchplan with ID [%s], Parallelism is now set to [%d]", childID.Name, eCtx.IncrementParallelism()) + logger.Infof(ctx, "Launched launchplan with ID [%s], Parallelism is now set to [%d]", childID.GetName(), eCtx.IncrementParallelism()) } return handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{ @@ -180,7 +180,7 @@ func (l *launchPlanHandler) CheckLaunchPlanStatus(ctx context.Context, nCtx inte switch wfStatusClosure.GetPhase() { case core.WorkflowExecution_ABORTED: wErr = fmt.Errorf("launchplan execution aborted") - err = errors.Wrapf(errors.RemoteChildWorkflowExecutionFailed, nCtx.NodeID(), wErr, "launchplan [%s] aborted", childID.Name) + err = errors.Wrapf(errors.RemoteChildWorkflowExecutionFailed, nCtx.NodeID(), wErr, "launchplan [%s] aborted", childID.GetName()) return handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoFailure(core.ExecutionError_USER, errors.RemoteChildWorkflowExecutionFailed, err.Error(), &handler.ExecutionInfo{ WorkflowNodeInfo: &handler.WorkflowNodeInfo{LaunchedWorkflowID: childID}, })), nil diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/admin.go b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/admin.go index 1ce0568bf6..91709b411d 100644 --- a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/admin.go +++ b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/admin.go @@ -51,7 +51,7 @@ func (e executionCacheItem) IsTerminal() bool { if e.ExecutionClosure == nil { return false } - return e.ExecutionClosure.Phase == core.WorkflowExecution_ABORTED || e.ExecutionClosure.Phase == core.WorkflowExecution_FAILED || e.ExecutionClosure.Phase == core.WorkflowExecution_SUCCEEDED + return e.ExecutionClosure.GetPhase() == core.WorkflowExecution_ABORTED || e.ExecutionClosure.GetPhase() == core.WorkflowExecution_FAILED || e.ExecutionClosure.GetPhase() == core.WorkflowExecution_SUCCEEDED } func (e executionCacheItem) ID() string { @@ -63,7 +63,7 @@ func (a *adminLaunchPlanExecutor) handleLaunchError(ctx context.Context, isRecov statusCode := status.Code(err) if isRecovery && statusCode == codes.NotFound { - logger.Warnf(ctx, "failed to recover workflow [%s] with err %+v. will attempt to launch instead", launchPlanRef.Name, err) + logger.Warnf(ctx, "failed to recover workflow [%s] with err %+v. will attempt to launch instead", launchPlanRef.GetName(), err) return nil } switch statusCode { @@ -73,9 +73,9 @@ func (a *adminLaunchPlanExecutor) handleLaunchError(ctx context.Context, isRecov logger.Errorf(ctx, "Failed to add ExecID [%v] to auto refresh cache", executionID) } - return stdErr.Wrapf(RemoteErrorAlreadyExists, err, "ExecID %s already exists", executionID.Name) + return stdErr.Wrapf(RemoteErrorAlreadyExists, err, "ExecID %s already exists", executionID.GetName()) case codes.DataLoss, codes.DeadlineExceeded, codes.Internal, codes.Unknown, codes.Canceled: - return stdErr.Wrapf(RemoteErrorSystem, err, "failed to launch workflow [%s], system error", launchPlanRef.Name) + return stdErr.Wrapf(RemoteErrorSystem, err, "failed to launch workflow [%s], system error", launchPlanRef.GetName()) default: return stdErr.Wrapf(RemoteErrorUser, err, "failed to launch workflow") } @@ -88,7 +88,7 @@ func (a *adminLaunchPlanExecutor) Launch(ctx context.Context, launchCtx LaunchCo if launchCtx.RecoveryExecution != nil { _, err = a.adminClient.RecoverExecution(ctx, &admin.ExecutionRecoverRequest{ Id: launchCtx.RecoveryExecution, - Name: executionID.Name, + Name: executionID.GetName(), Metadata: &admin.ExecutionMetadata{ ParentNodeExecution: launchCtx.ParentNodeExecution, }, @@ -128,9 +128,9 @@ func (a *adminLaunchPlanExecutor) Launch(ctx context.Context, launchCtx LaunchCo } req := &admin.ExecutionCreateRequest{ - Project: executionID.Project, - Domain: executionID.Domain, - Name: executionID.Name, + Project: executionID.GetProject(), + Domain: executionID.GetDomain(), + Name: executionID.GetName(), Inputs: inputs, Spec: &admin.ExecutionSpec{ LaunchPlan: launchPlanRef, @@ -143,7 +143,7 @@ func (a *adminLaunchPlanExecutor) Launch(ctx context.Context, launchCtx LaunchCo Labels: &admin.Labels{Values: labels}, Annotations: &admin.Annotations{Values: launchCtx.Annotations}, SecurityContext: &launchCtx.SecurityContext, - MaxParallelism: int32(launchCtx.MaxParallelism), + MaxParallelism: int32(launchCtx.MaxParallelism), // #nosec G115 RawOutputDataConfig: launchCtx.RawOutputDataConfig, Interruptible: interruptible, OverwriteCache: launchCtx.OverwriteCache, @@ -235,8 +235,8 @@ func (a *adminLaunchPlanExecutor) syncItem(ctx context.Context, batch cache.Batc // Is workflow already terminated, then no need to fetch information, also the item can be dropped from the cache if exec.ExecutionClosure != nil { - if IsWorkflowTerminated(exec.ExecutionClosure.Phase) { - logger.Debugf(ctx, "Workflow [%s] is already completed, will not fetch execution information", exec.ExecutionClosure.WorkflowId) + if IsWorkflowTerminated(exec.ExecutionClosure.GetPhase()) { + logger.Debugf(ctx, "Workflow [%s] is already completed, will not fetch execution information", exec.ExecutionClosure.GetWorkflowId()) resp = append(resp, cache.ItemSyncResponse{ ID: obj.GetID(), Item: exec, @@ -256,7 +256,7 @@ func (a *adminLaunchPlanExecutor) syncItem(ctx context.Context, batch cache.Batc // TODO: Define which error codes are system errors (and return the error) vs user stdErr. if status.Code(err) == codes.NotFound { - err = stdErr.Wrapf(RemoteErrorNotFound, err, "execID [%s] not found on remote", exec.WorkflowExecutionIdentifier.Name) + err = stdErr.Wrapf(RemoteErrorNotFound, err, "execID [%s] not found on remote", exec.WorkflowExecutionIdentifier.GetName()) } else { err = stdErr.Wrapf(RemoteErrorSystem, err, "system error") } @@ -315,7 +315,7 @@ func (a *adminLaunchPlanExecutor) syncItem(ctx context.Context, batch cache.Batc ID: obj.GetID(), Item: executionCacheItem{ WorkflowExecutionIdentifier: exec.WorkflowExecutionIdentifier, - ExecutionClosure: res.Closure, + ExecutionClosure: res.GetClosure(), ExecutionOutputs: outputs, ParentWorkflowID: exec.ParentWorkflowID, }, @@ -327,7 +327,7 @@ func (a *adminLaunchPlanExecutor) syncItem(ctx context.Context, batch cache.Batc // prematurely, there is a chance the parent workflow evaluates before the cache is updated. for _, itemSyncResponse := range resp { exec := itemSyncResponse.Item.(executionCacheItem) - if exec.ExecutionClosure != nil && IsWorkflowTerminated(exec.ExecutionClosure.Phase) { + if exec.ExecutionClosure != nil && IsWorkflowTerminated(exec.ExecutionClosure.GetPhase()) { a.enqueueWorkflow(exec.ParentWorkflowID) } } @@ -344,7 +344,8 @@ func NewAdminLaunchPlanExecutor(_ context.Context, client service.AdminServiceCl } rateLimiter := &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(cfg.TPS), cfg.Burst)} - c, err := cache.NewAutoRefreshCache("admin-launcher", exec.syncItem, rateLimiter, cfg.CacheResyncDuration.Duration, cfg.Workers, cfg.MaxCacheSize, scope) + // #nosec G115 + c, err := cache.NewAutoRefreshCache("admin-launcher", exec.syncItem, rateLimiter, cfg.CacheResyncDuration.Duration, uint(cfg.Workers), uint(cfg.MaxCacheSize), scope) if err != nil { return nil, err } diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/admin_test.go b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/admin_test.go index ead1312e17..cf60cc85d8 100644 --- a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/admin_test.go +++ b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/admin_test.go @@ -61,7 +61,7 @@ func TestAdminLaunchPlanExecutor_GetStatus(t *testing.T) { mockClient.On("CreateExecution", ctx, mock.MatchedBy(func(o *admin.ExecutionCreateRequest) bool { - return o.Project == "p" && o.Domain == "d" && o.Name == "n" && o.Spec.Inputs == nil + return o.GetProject() == "p" && o.GetDomain() == "d" && o.GetName() == "n" && o.GetSpec().GetInputs() == nil }), ).Return(nil, nil) @@ -108,7 +108,7 @@ func TestAdminLaunchPlanExecutor_GetStatus(t *testing.T) { mockClient.On("CreateExecution", ctx, mock.MatchedBy(func(o *admin.ExecutionCreateRequest) bool { - return o.Project == "p" && o.Domain == "d" && o.Name == "n" && o.Spec.Inputs == nil + return o.GetProject() == "p" && o.GetDomain() == "d" && o.GetName() == "n" && o.GetSpec().GetInputs() == nil }), ).Return(nil, nil) @@ -170,9 +170,9 @@ func TestAdminLaunchPlanExecutor_Launch(t *testing.T) { mockClient.On("CreateExecution", ctx, mock.MatchedBy(func(o *admin.ExecutionCreateRequest) bool { - return o.Project == "p" && o.Domain == "d" && o.Name == "n" && o.Spec.Inputs == nil && - o.Spec.Metadata.Mode == admin.ExecutionMetadata_CHILD_WORKFLOW && - reflect.DeepEqual(o.Spec.Labels.Values, map[string]string{"foo": "bar"}) // Ensure shard-key was removed. + return o.GetProject() == "p" && o.GetDomain() == "d" && o.GetName() == "n" && o.GetSpec().GetInputs() == nil && + o.GetSpec().GetMetadata().GetMode() == admin.ExecutionMetadata_CHILD_WORKFLOW && + reflect.DeepEqual(o.GetSpec().GetLabels().GetValues(), map[string]string{"foo": "bar"}) // Ensure shard-key was removed. }), ).Return(nil, nil) assert.NoError(t, err) @@ -216,8 +216,8 @@ func TestAdminLaunchPlanExecutor_Launch(t *testing.T) { mockClient.On("RecoverExecution", ctx, mock.MatchedBy(func(o *admin.ExecutionRecoverRequest) bool { - return o.Id.Project == "p" && o.Id.Domain == "d" && o.Id.Name == "w" && o.Name == "n" && - proto.Equal(o.Metadata.ParentNodeExecution, parentNodeExecution) + return o.GetId().GetProject() == "p" && o.GetId().GetDomain() == "d" && o.GetId().GetName() == "w" && o.GetName() == "n" && + proto.Equal(o.GetMetadata().GetParentNodeExecution(), parentNodeExecution) }), ).Return(nil, nil) assert.NoError(t, err) @@ -256,8 +256,8 @@ func TestAdminLaunchPlanExecutor_Launch(t *testing.T) { mockClient.On("RecoverExecution", ctx, mock.MatchedBy(func(o *admin.ExecutionRecoverRequest) bool { - return o.Id.Project == "p" && o.Id.Domain == "d" && o.Id.Name == "w" && o.Name == "n" && - proto.Equal(o.Metadata.ParentNodeExecution, parentNodeExecution) + return o.GetId().GetProject() == "p" && o.GetId().GetDomain() == "d" && o.GetId().GetName() == "w" && o.GetName() == "n" && + proto.Equal(o.GetMetadata().GetParentNodeExecution(), parentNodeExecution) }), ).Return(nil, recoveryErr) @@ -266,8 +266,8 @@ func TestAdminLaunchPlanExecutor_Launch(t *testing.T) { ctx, mock.MatchedBy(func(o *admin.ExecutionCreateRequest) bool { createCalled = true - return o.Project == "p" && o.Domain == "d" && o.Name == "n" && o.Spec.Inputs == nil && - o.Spec.Metadata.Mode == admin.ExecutionMetadata_CHILD_WORKFLOW + return o.GetProject() == "p" && o.GetDomain() == "d" && o.GetName() == "n" && o.GetSpec().GetInputs() == nil && + o.GetSpec().GetMetadata().GetMode() == admin.ExecutionMetadata_CHILD_WORKFLOW }), ).Return(nil, nil) @@ -367,7 +367,7 @@ func TestAdminLaunchPlanExecutor_Kill(t *testing.T) { exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, adminConfig, promutils.NewTestScope(), memStore, func(string) {}) mockClient.On("TerminateExecution", ctx, - mock.MatchedBy(func(o *admin.ExecutionTerminateRequest) bool { return o.Id == id && o.Cause == reason }), + mock.MatchedBy(func(o *admin.ExecutionTerminateRequest) bool { return o.GetId() == id && o.GetCause() == reason }), ).Return(&admin.ExecutionTerminateResponse{}, nil) assert.NoError(t, err) err = exec.Kill(ctx, id, reason) @@ -380,7 +380,7 @@ func TestAdminLaunchPlanExecutor_Kill(t *testing.T) { exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, adminConfig, promutils.NewTestScope(), memStore, func(string) {}) mockClient.On("TerminateExecution", ctx, - mock.MatchedBy(func(o *admin.ExecutionTerminateRequest) bool { return o.Id == id && o.Cause == reason }), + mock.MatchedBy(func(o *admin.ExecutionTerminateRequest) bool { return o.GetId() == id && o.GetCause() == reason }), ).Return(nil, status.Error(codes.NotFound, "")) assert.NoError(t, err) err = exec.Kill(ctx, id, reason) @@ -393,7 +393,7 @@ func TestAdminLaunchPlanExecutor_Kill(t *testing.T) { exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, adminConfig, promutils.NewTestScope(), memStore, func(string) {}) mockClient.On("TerminateExecution", ctx, - mock.MatchedBy(func(o *admin.ExecutionTerminateRequest) bool { return o.Id == id && o.Cause == reason }), + mock.MatchedBy(func(o *admin.ExecutionTerminateRequest) bool { return o.GetId() == id && o.GetCause() == reason }), ).Return(nil, status.Error(codes.Canceled, "")) assert.NoError(t, err) err = exec.Kill(ctx, id, reason) @@ -426,7 +426,7 @@ func TestNewAdminLaunchPlanExecutor_GetLaunchPlan(t *testing.T) { ).Return(&admin.LaunchPlan{Id: id}, nil) lp, err := exec.GetLaunchPlan(ctx, id) assert.NoError(t, err) - assert.Equal(t, lp.Id, id) + assert.Equal(t, lp.GetId(), id) }) t.Run("launch plan not found", func(t *testing.T) { diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/noop.go b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/noop.go index 3f7444788d..4c5873cc4d 100644 --- a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/noop.go +++ b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/noop.go @@ -19,12 +19,12 @@ type failFastWorkflowLauncher struct { func (failFastWorkflowLauncher) Launch(ctx context.Context, launchCtx LaunchContext, executionID *core.WorkflowExecutionIdentifier, launchPlanRef *core.Identifier, inputs *core.LiteralMap, parentWorkflowID v1alpha1.WorkflowID) error { - logger.Infof(ctx, "Fail: Launch Workflow requested with ExecID [%s], LaunchPlan [%s]", executionID.Name, fmt.Sprintf("%s:%s:%s", launchPlanRef.Project, launchPlanRef.Domain, launchPlanRef.Name)) + logger.Infof(ctx, "Fail: Launch Workflow requested with ExecID [%s], LaunchPlan [%s]", executionID.GetName(), fmt.Sprintf("%s:%s:%s", launchPlanRef.GetProject(), launchPlanRef.GetDomain(), launchPlanRef.GetName())) return errors.Wrapf(RemoteErrorUser, fmt.Errorf("badly configured system"), "please enable admin workflow launch to use launchplans") } func (failFastWorkflowLauncher) GetStatus(ctx context.Context, executionID *core.WorkflowExecutionIdentifier) (*admin.ExecutionClosure, *core.LiteralMap, error) { - logger.Infof(ctx, "NOOP: Workflow Status ExecID [%s]", executionID.Name) + logger.Infof(ctx, "NOOP: Workflow Status ExecID [%s]", executionID.GetName()) return nil, nil, errors.Wrapf(RemoteErrorUser, fmt.Errorf("badly configured system"), "please enable admin workflow launch to use launchplans") } diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan_test.go b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan_test.go index 68b5383b78..62445d5efa 100644 --- a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan_test.go +++ b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan_test.go @@ -77,11 +77,11 @@ func TestSubWorkflowHandler_StartLaunchPlan(t *testing.T) { mockLPExec.On("Launch", ctx, mock.MatchedBy(func(o launchplan.LaunchContext) bool { - return o.ParentNodeExecution.NodeId == mockNode.GetID() && - o.ParentNodeExecution.ExecutionId == wfExecID + return o.ParentNodeExecution.GetNodeId() == mockNode.GetID() && + o.ParentNodeExecution.GetExecutionId() == wfExecID }), mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), mock.MatchedBy(func(o *core.Identifier) bool { return lpID == o }), mock.MatchedBy(func(o *core.LiteralMap) bool { return o.Literals == nil }), @@ -107,11 +107,11 @@ func TestSubWorkflowHandler_StartLaunchPlan(t *testing.T) { mockLPExec.On("Launch", ctx, mock.MatchedBy(func(o launchplan.LaunchContext) bool { - return o.ParentNodeExecution.NodeId == mockNode.GetID() && - o.ParentNodeExecution.ExecutionId == wfExecID + return o.ParentNodeExecution.GetNodeId() == mockNode.GetID() && + o.ParentNodeExecution.GetExecutionId() == wfExecID }), mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), mock.MatchedBy(func(o *core.Identifier) bool { return lpID == o }), mock.MatchedBy(func(o *core.LiteralMap) bool { return o.Literals == nil }), @@ -134,11 +134,11 @@ func TestSubWorkflowHandler_StartLaunchPlan(t *testing.T) { mockLPExec.On("Launch", ctx, mock.MatchedBy(func(o launchplan.LaunchContext) bool { - return o.ParentNodeExecution.NodeId == mockNode.GetID() && - o.ParentNodeExecution.ExecutionId == wfExecID + return o.ParentNodeExecution.GetNodeId() == mockNode.GetID() && + o.ParentNodeExecution.GetExecutionId() == wfExecID }), mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), mock.MatchedBy(func(o *core.Identifier) bool { return lpID == o }), mock.MatchedBy(func(o *core.LiteralMap) bool { return o.Literals == nil }), @@ -161,11 +161,11 @@ func TestSubWorkflowHandler_StartLaunchPlan(t *testing.T) { mockLPExec.On("Launch", ctx, mock.MatchedBy(func(o launchplan.LaunchContext) bool { - return o.ParentNodeExecution.NodeId == mockNode.GetID() && - o.ParentNodeExecution.ExecutionId == wfExecID + return o.ParentNodeExecution.GetNodeId() == mockNode.GetID() && + o.ParentNodeExecution.GetExecutionId() == wfExecID }), mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), mock.MatchedBy(func(o *core.Identifier) bool { return lpID == o }), mock.MatchedBy(func(o *core.LiteralMap) bool { return o.Literals == nil }), @@ -216,11 +216,11 @@ func TestSubWorkflowHandler_StartLaunchPlan(t *testing.T) { mockLPExec.On("Launch", ctx, mock.MatchedBy(func(o launchplan.LaunchContext) bool { - return o.ParentNodeExecution.NodeId == mockNode.GetID() && - o.ParentNodeExecution.ExecutionId == wfExecID + return o.ParentNodeExecution.GetNodeId() == mockNode.GetID() && + o.ParentNodeExecution.GetExecutionId() == wfExecID }), mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), mock.MatchedBy(func(o *core.Identifier) bool { return lpID == o }), mock.MatchedBy(func(o *core.LiteralMap) bool { return o.Literals == nil }), @@ -317,7 +317,7 @@ func TestSubWorkflowHandler_CheckLaunchPlanStatus(t *testing.T) { mockLPExec.On("GetStatus", ctx, mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), ).Return(&admin.ExecutionClosure{ Phase: core.WorkflowExecution_RUNNING, @@ -340,7 +340,7 @@ func TestSubWorkflowHandler_CheckLaunchPlanStatus(t *testing.T) { mockLPExec.On("GetStatus", ctx, mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), ).Return(&admin.ExecutionClosure{ Phase: core.WorkflowExecution_SUCCEEDED, @@ -375,7 +375,7 @@ func TestSubWorkflowHandler_CheckLaunchPlanStatus(t *testing.T) { mockLPExec.On("GetStatus", ctx, mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), ).Return(&admin.ExecutionClosure{ Phase: core.WorkflowExecution_SUCCEEDED, @@ -417,7 +417,7 @@ func TestSubWorkflowHandler_CheckLaunchPlanStatus(t *testing.T) { mockLPExec.On("GetStatus", ctx, mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), ).Return(&admin.ExecutionClosure{ Phase: core.WorkflowExecution_SUCCEEDED, @@ -454,7 +454,7 @@ func TestSubWorkflowHandler_CheckLaunchPlanStatus(t *testing.T) { mockLPExec.On("GetStatus", ctx, mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), ).Return(&admin.ExecutionClosure{ Phase: core.WorkflowExecution_FAILED, @@ -484,7 +484,7 @@ func TestSubWorkflowHandler_CheckLaunchPlanStatus(t *testing.T) { mockLPExec.On("GetStatus", ctx, mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), ).Return(&admin.ExecutionClosure{ Phase: core.WorkflowExecution_FAILED, @@ -508,7 +508,7 @@ func TestSubWorkflowHandler_CheckLaunchPlanStatus(t *testing.T) { mockLPExec.On("GetStatus", ctx, mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), ).Return(&admin.ExecutionClosure{ Phase: core.WorkflowExecution_ABORTED, @@ -532,7 +532,7 @@ func TestSubWorkflowHandler_CheckLaunchPlanStatus(t *testing.T) { mockLPExec.On("GetStatus", ctx, mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), ).Return(nil, &core.LiteralMap{}, errors.Wrapf(launchplan.RemoteErrorNotFound, fmt.Errorf("some error"), "not found")) @@ -554,7 +554,7 @@ func TestSubWorkflowHandler_CheckLaunchPlanStatus(t *testing.T) { mockLPExec.On("GetStatus", ctx, mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), ).Return(nil, &core.LiteralMap{}, errors.Wrapf(launchplan.RemoteErrorSystem, fmt.Errorf("some error"), "not found")) @@ -582,7 +582,7 @@ func TestSubWorkflowHandler_CheckLaunchPlanStatus(t *testing.T) { mockLPExec.On("GetStatus", ctx, mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), ).Return(&admin.ExecutionClosure{ Phase: core.WorkflowExecution_SUCCEEDED, @@ -616,7 +616,7 @@ func TestSubWorkflowHandler_CheckLaunchPlanStatus(t *testing.T) { mockLPExec.On("GetStatus", ctx, mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), ).Return(&admin.ExecutionClosure{ Phase: core.WorkflowExecution_SUCCEEDED, @@ -670,7 +670,7 @@ func TestLaunchPlanHandler_HandleAbort(t *testing.T) { mockLPExec.On("Kill", ctx, mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), mock.AnythingOfType(reflect.String.String()), ).Return(nil) @@ -692,7 +692,7 @@ func TestLaunchPlanHandler_HandleAbort(t *testing.T) { mockLPExec.On("Kill", ctx, mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool { - return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain) + return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain()) }), mock.AnythingOfType(reflect.String.String()), ).Return(expectedErr) diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/util.go b/flytepropeller/pkg/controller/nodes/subworkflow/util.go index ae23439c97..8c119a2175 100644 --- a/flytepropeller/pkg/controller/nodes/subworkflow/util.go +++ b/flytepropeller/pkg/controller/nodes/subworkflow/util.go @@ -10,21 +10,21 @@ import ( const maxLengthForSubWorkflow = 20 func GetChildWorkflowExecutionID(nodeExecID *core.NodeExecutionIdentifier, attempt uint32) (*core.WorkflowExecutionIdentifier, error) { - name, err := encoding.FixedLengthUniqueIDForParts(maxLengthForSubWorkflow, []string{nodeExecID.ExecutionId.Name, nodeExecID.NodeId, strconv.Itoa(int(attempt))}) + name, err := encoding.FixedLengthUniqueIDForParts(maxLengthForSubWorkflow, []string{nodeExecID.GetExecutionId().GetName(), nodeExecID.GetNodeId(), strconv.Itoa(int(attempt))}) if err != nil { return nil, err } // Restriction on name is 20 chars return &core.WorkflowExecutionIdentifier{ - Project: nodeExecID.ExecutionId.Project, - Domain: nodeExecID.ExecutionId.Domain, + Project: nodeExecID.GetExecutionId().GetProject(), + Domain: nodeExecID.GetExecutionId().GetDomain(), Name: name, }, nil } func GetChildWorkflowExecutionIDV2(nodeExecID *core.NodeExecutionIdentifier, attempt uint32) (*core.WorkflowExecutionIdentifier, error) { - name, err := encoding.FixedLengthUniqueIDForParts(maxLengthForSubWorkflow, []string{nodeExecID.ExecutionId.Name, nodeExecID.NodeId, strconv.Itoa(int(attempt))}, + name, err := encoding.FixedLengthUniqueIDForParts(maxLengthForSubWorkflow, []string{nodeExecID.GetExecutionId().GetName(), nodeExecID.GetNodeId(), strconv.Itoa(int(attempt))}, encoding.NewAlgorithmOption(encoding.Algorithm64)) if err != nil { return nil, err @@ -32,8 +32,8 @@ func GetChildWorkflowExecutionIDV2(nodeExecID *core.NodeExecutionIdentifier, att // Restriction on name is 20 chars return &core.WorkflowExecutionIdentifier{ - Project: nodeExecID.ExecutionId.Project, - Domain: nodeExecID.ExecutionId.Domain, + Project: nodeExecID.GetExecutionId().GetProject(), + Domain: nodeExecID.GetExecutionId().GetDomain(), Name: name, }, nil } diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/util_test.go b/flytepropeller/pkg/controller/nodes/subworkflow/util_test.go index f1df02deb6..96b93b8f8a 100644 --- a/flytepropeller/pkg/controller/nodes/subworkflow/util_test.go +++ b/flytepropeller/pkg/controller/nodes/subworkflow/util_test.go @@ -19,6 +19,6 @@ func TestGetChildWorkflowExecutionID(t *testing.T) { }, }, 1) - assert.Equal(t, id.Name, "fav2uxxi") + assert.Equal(t, id.GetName(), "fav2uxxi") assert.NoError(t, err) } diff --git a/flytepropeller/pkg/controller/nodes/task/cache.go b/flytepropeller/pkg/controller/nodes/task/cache.go index fab3cd4d61..d408a5af85 100644 --- a/flytepropeller/pkg/controller/nodes/task/cache.go +++ b/flytepropeller/pkg/controller/nodes/task/cache.go @@ -26,10 +26,10 @@ func (t *Handler) GetCatalogKey(ctx context.Context, nCtx interfaces.NodeExecuti } return catalog.Key{ - Identifier: *taskTemplate.Id, - CacheVersion: taskTemplate.Metadata.DiscoveryVersion, - CacheIgnoreInputVars: taskTemplate.Metadata.CacheIgnoreInputVars, - TypedInterface: *taskTemplate.Interface, + Identifier: *taskTemplate.Id, //nolint:protogetter + CacheVersion: taskTemplate.GetMetadata().GetDiscoveryVersion(), + CacheIgnoreInputVars: taskTemplate.GetMetadata().GetCacheIgnoreInputVars(), + TypedInterface: *taskTemplate.GetInterface(), InputReader: nCtx.InputReader(), }, nil } @@ -62,5 +62,5 @@ func (t *Handler) IsCacheable(ctx context.Context, nCtx interfaces.NodeExecution return false, false, err } - return taskTemplate.Metadata.Discoverable, taskTemplate.Metadata.Discoverable && taskTemplate.Metadata.CacheSerializable, nil + return taskTemplate.GetMetadata().GetDiscoverable(), taskTemplate.GetMetadata().GetDiscoverable() && taskTemplate.GetMetadata().GetCacheSerializable(), nil } diff --git a/flytepropeller/pkg/controller/nodes/task/handler.go b/flytepropeller/pkg/controller/nodes/task/handler.go index 2adea27312..000d6bd7e7 100644 --- a/flytepropeller/pkg/controller/nodes/task/handler.go +++ b/flytepropeller/pkg/controller/nodes/task/handler.go @@ -434,6 +434,7 @@ func (t Handler) invokePlugin(ctx context.Context, p pluginCore.Plugin, tCtx *ta pluginTrns.TransitionPreviouslyRecorded() return pluginTrns, nil } + // #nosec G115 if pluginTrns.pInfo.Version() > uint32(t.cfg.MaxPluginPhaseVersions) { logger.Errorf(ctx, "Too many Plugin p versions for plugin [%s]. p versions [%d/%d]", p.GetID(), pluginTrns.pInfo.Version(), t.cfg.MaxPluginPhaseVersions) pluginTrns.ObservedExecutionError(&io.ExecutionError{ @@ -565,7 +566,7 @@ func (t Handler) Handle(ctx context.Context, nCtx interfaces.NodeExecutionContex logger.Errorf(ctx, "failed to read TaskTemplate, error :%s", err.Error()) return handler.UnknownTransition, err } - if tk.Interface != nil && tk.Interface.Inputs != nil && len(tk.Interface.Inputs.Variables) > 0 { + if tk.GetInterface() != nil && tk.GetInterface().GetInputs() != nil && len(tk.GetInterface().GetInputs().GetVariables()) > 0 { inputs, err = nCtx.InputReader().Get(ctx) if err != nil { logger.Errorf(ctx, "failed to read inputs when checking catalog cache %w", err) @@ -577,7 +578,7 @@ func (t Handler) Handle(ctx context.Context, nCtx interfaces.NodeExecutionContex occurredAt := time.Now() // STEP 2: If no cache-hit and not transitioning to PhaseWaitingForCache, then lets invoke the plugin and wait for a transition out of undefined if pluginTrns.execInfo.TaskNodeInfo == nil || (pluginTrns.pInfo.Phase() != pluginCore.PhaseWaitingForCache && - pluginTrns.execInfo.TaskNodeInfo.TaskNodeMetadata.CacheStatus != core.CatalogCacheStatus_CACHE_HIT) { + pluginTrns.execInfo.TaskNodeInfo.TaskNodeMetadata.GetCacheStatus() != core.CatalogCacheStatus_CACHE_HIT) { var err error pluginTrns, err = t.invokePlugin(ctx, p, tCtx, ts) @@ -624,7 +625,7 @@ func (t Handler) Handle(ctx context.Context, nCtx interfaces.NodeExecutionContex return handler.UnknownTransition, err } if err := nCtx.EventsRecorder().RecordTaskEvent(ctx, evInfo, t.eventConfig); err != nil { - logger.Errorf(ctx, "Event recording failed for Plugin [%s], eventPhase [%s], error :%s", p.GetID(), evInfo.Phase.String(), err.Error()) + logger.Errorf(ctx, "Event recording failed for Plugin [%s], eventPhase [%s], error :%s", p.GetID(), evInfo.GetPhase().String(), err.Error()) // Check for idempotency // Check for terminate state error return handler.UnknownTransition, err @@ -694,8 +695,8 @@ func (t *Handler) ValidateOutput(ctx context.Context, nodeID v1alpha1.NodeID, i return nil, err } - iface := tk.Interface - outputsDeclared := iface != nil && iface.Outputs != nil && len(iface.Outputs.Variables) > 0 + iface := tk.GetInterface() + outputsDeclared := iface != nil && iface.GetOutputs() != nil && len(iface.GetOutputs().GetVariables()) > 0 if r == nil { if outputsDeclared { @@ -838,7 +839,7 @@ func (t Handler) Abort(ctx context.Context, nCtx interfaces.NodeExecutionContext evInfo.Phase = core.TaskExecution_ABORTED } if err := evRecorder.RecordTaskEvent(ctx, evInfo, t.eventConfig); err != nil { - logger.Errorf(ctx, "Event recording failed for Plugin [%s], eventPhase [%s], error :%s", p.GetID(), evInfo.Phase.String(), err.Error()) + logger.Errorf(ctx, "Event recording failed for Plugin [%s], eventPhase [%s], error :%s", p.GetID(), evInfo.GetPhase().String(), err.Error()) // Check for idempotency // Check for terminate state error return err diff --git a/flytepropeller/pkg/controller/nodes/task/handler_test.go b/flytepropeller/pkg/controller/nodes/task/handler_test.go index 52b937cb90..62e64c02f3 100644 --- a/flytepropeller/pkg/controller/nodes/task/handler_test.go +++ b/flytepropeller/pkg/controller/nodes/task/handler_test.go @@ -711,6 +711,7 @@ func Test_task_Handle_NoCatalog(t *testing.T) { t.Run(tt.name, func(t *testing.T) { state := &taskNodeStateHolder{} ev := &fakeBufferedEventRecorder{} + // #nosec G115 nCtx := createNodeContext(tt.args.startingPluginPhase, uint32(tt.args.startingPluginPhaseVersion), tt.args.expectedState, ev, "test", state, tt.want.incrParallel) c := &pluginCatalogMocks.Client{} tk := Handler{ @@ -735,11 +736,11 @@ func Test_task_Handle_NoCatalog(t *testing.T) { if tt.want.event { if assert.Equal(t, 1, len(ev.evs)) { e := ev.evs[0] - assert.Equal(t, tt.want.eventPhase.String(), e.Phase.String()) + assert.Equal(t, tt.want.eventPhase.String(), e.GetPhase().String()) if tt.args.expectedState.TaskInfo != nil { - assert.Equal(t, tt.args.expectedState.TaskInfo.Logs, e.Logs) + assert.Equal(t, tt.args.expectedState.TaskInfo.Logs, e.GetLogs()) } - if e.Phase == core.TaskExecution_RUNNING || e.Phase == core.TaskExecution_SUCCEEDED { + if e.GetPhase() == core.TaskExecution_RUNNING || e.GetPhase() == core.TaskExecution_SUCCEEDED { assert.True(t, proto.Equal(inputs, e.GetInputData())) } } @@ -761,11 +762,11 @@ func Test_task_Handle_NoCatalog(t *testing.T) { assert.Equal(t, tt.args.expectedState.PhaseVersion, state.s.PluginPhaseVersion) if tt.want.checkpoint { assert.Equal(t, "s3://sandbox/x/name-n1-1/_flytecheckpoints", - got.Info().GetInfo().TaskNodeInfo.TaskNodeMetadata.CheckpointUri) + got.Info().GetInfo().TaskNodeInfo.TaskNodeMetadata.GetCheckpointUri()) } else { assert.True(t, got.Info().GetInfo() == nil || got.Info().GetInfo().TaskNodeInfo == nil || got.Info().GetInfo().TaskNodeInfo.TaskNodeMetadata == nil || - len(got.Info().GetInfo().TaskNodeInfo.TaskNodeMetadata.CheckpointUri) == 0) + len(got.Info().GetInfo().TaskNodeInfo.TaskNodeMetadata.GetCheckpointUri()) == 0) } } }) diff --git a/flytepropeller/pkg/controller/nodes/task/k8s/task_exec_context.go b/flytepropeller/pkg/controller/nodes/task/k8s/task_exec_context.go index bb987acbc2..3b5cd3a147 100644 --- a/flytepropeller/pkg/controller/nodes/task/k8s/task_exec_context.go +++ b/flytepropeller/pkg/controller/nodes/task/k8s/task_exec_context.go @@ -50,8 +50,8 @@ func newTaskExecutionMetadata(tCtx pluginsCore.TaskExecutionMetadata, taskTmpl * var err error secretsMap := make(map[string]string) injectLabels := make(map[string]string) - if taskTmpl.SecurityContext != nil && len(taskTmpl.SecurityContext.Secrets) > 0 { - secretsMap, err = secrets.MarshalSecretsToMapStrings(taskTmpl.SecurityContext.Secrets) + if taskTmpl.GetSecurityContext() != nil && len(taskTmpl.GetSecurityContext().GetSecrets()) > 0 { + secretsMap, err = secrets.MarshalSecretsToMapStrings(taskTmpl.GetSecurityContext().GetSecrets()) if err != nil { return TaskExecutionMetadata{}, err } @@ -59,7 +59,7 @@ func newTaskExecutionMetadata(tCtx pluginsCore.TaskExecutionMetadata, taskTmpl * injectLabels[secrets.PodLabel] = secrets.PodLabelValue } - id := tCtx.GetSecurityContext().RunAs.ExecutionIdentity + id := tCtx.GetSecurityContext().RunAs.GetExecutionIdentity() //nolint:protogetter if len(id) > 0 { sanitizedID := k8sUtils.SanitizeLabelValue(id) injectLabels[executionIdentityVariable] = sanitizedID diff --git a/flytepropeller/pkg/controller/nodes/task/plugin_state_manager.go b/flytepropeller/pkg/controller/nodes/task/plugin_state_manager.go index 3613fec916..52edbb3d70 100644 --- a/flytepropeller/pkg/controller/nodes/task/plugin_state_manager.go +++ b/flytepropeller/pkg/controller/nodes/task/plugin_state_manager.go @@ -75,7 +75,7 @@ func newPluginStateManager(_ context.Context, prevCodecVersion CodecVersion, pre return &pluginStateManager{ codec: codex.GobStateCodec{}, codecVersion: GobCodecVersion, - prevStateVersion: uint8(prevStateVersion), + prevStateVersion: uint8(prevStateVersion), // #nosec G115 prevState: prevState, }, nil } diff --git a/flytepropeller/pkg/controller/nodes/task/secretmanager/secrets.go b/flytepropeller/pkg/controller/nodes/task/secretmanager/secrets.go index 230017d7d3..7ffbfff9f2 100644 --- a/flytepropeller/pkg/controller/nodes/task/secretmanager/secrets.go +++ b/flytepropeller/pkg/controller/nodes/task/secretmanager/secrets.go @@ -50,18 +50,18 @@ func (f FileEnvSecretManager) Get(ctx context.Context, key string) (string, erro // Prefix+SecretGroup+_+SecretKey. If the secret is not found in environment, it'll lookup the secret from files using // the configured SecretPath / SecretGroup / SecretKey. func (f FileEnvSecretManager) GetForSecret(ctx context.Context, secret *coreIdl.Secret) (string, error) { - if len(secret.Group) == 0 || len(secret.Key) == 0 { + if len(secret.GetGroup()) == 0 || len(secret.GetKey()) == 0 { return "", fmt.Errorf("both key and group are required parameters. Secret: [%v]", secret.String()) } - envVar := fmt.Sprintf(envVarLookupFormatter, f.envPrefix, strings.ToUpper(secret.Group), strings.ToUpper(secret.Key)) + envVar := fmt.Sprintf(envVarLookupFormatter, f.envPrefix, strings.ToUpper(secret.GetGroup()), strings.ToUpper(secret.GetKey())) v, ok := os.LookupEnv(envVar) if ok { logger.Debugf(ctx, "Secret found %s", v) return v, nil } - secretFile := filepath.Join(f.secretPath, filepath.Join(secret.Group, secret.Key)) + secretFile := filepath.Join(f.secretPath, filepath.Join(secret.GetGroup(), secret.GetKey())) if _, err := os.Stat(secretFile); err != nil { if os.IsNotExist(err) { return "", fmt.Errorf("secrets not found - Env [%s], file [%s]", envVar, secretFile) diff --git a/flytepropeller/pkg/controller/nodes/task/taskexec_context.go b/flytepropeller/pkg/controller/nodes/task/taskexec_context.go index 25b936a8e4..1f29060ca9 100644 --- a/flytepropeller/pkg/controller/nodes/task/taskexec_context.go +++ b/flytepropeller/pkg/controller/nodes/task/taskexec_context.go @@ -258,12 +258,12 @@ func (t *Handler) newTaskExecutionContext(ctx context.Context, nCtx interfaces.N length = *l } - rawOutputPrefix, uniqueID, err := ComputeRawOutputPrefix(ctx, length, nCtx, currentNodeUniqueID, id.RetryAttempt) + rawOutputPrefix, uniqueID, err := ComputeRawOutputPrefix(ctx, length, nCtx, currentNodeUniqueID, id.GetRetryAttempt()) if err != nil { return nil, err } - prevCheckpointPath, err := ComputePreviousCheckpointPath(ctx, length, nCtx, currentNodeUniqueID, id.RetryAttempt) + prevCheckpointPath, err := ComputePreviousCheckpointPath(ctx, length, nCtx, currentNodeUniqueID, id.GetRetryAttempt()) if err != nil { return nil, err } @@ -280,9 +280,9 @@ func (t *Handler) newTaskExecutionContext(ctx context.Context, nCtx interfaces.N } resourceNamespacePrefix := pluginCore.ResourceNamespace(t.resourceManager.GetID()).CreateSubNamespace(pluginCore.ResourceNamespace(plugin.GetID())) - maxAttempts := uint32(controllerconfig.GetConfig().NodeConfig.DefaultMaxAttempts) + maxAttempts := uint32(controllerconfig.GetConfig().NodeConfig.DefaultMaxAttempts) // #nosec G115 if nCtx.Node().GetRetryStrategy() != nil && nCtx.Node().GetRetryStrategy().MinAttempts != nil { - maxAttempts = uint32(*nCtx.Node().GetRetryStrategy().MinAttempts) + maxAttempts = uint32(*nCtx.Node().GetRetryStrategy().MinAttempts) // #nosec G115 } taskTemplatePath, err := ioutils.GetTaskTemplatePath(ctx, nCtx.DataStore(), nCtx.NodeStatus().GetDataDir()) diff --git a/flytepropeller/pkg/controller/nodes/task/taskexec_context_test.go b/flytepropeller/pkg/controller/nodes/task/taskexec_context_test.go index e798f82a04..9a469fd25c 100644 --- a/flytepropeller/pkg/controller/nodes/task/taskexec_context_test.go +++ b/flytepropeller/pkg/controller/nodes/task/taskexec_context_test.go @@ -167,10 +167,10 @@ func TestHandler_newTaskExecutionContext(t *testing.T) { assert.Equal(t, got.TaskExecutionMetadata().GetOverrides().GetResources(), resources) assert.Equal(t, got.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName(), "name-n1-1") - assert.Equal(t, got.TaskExecutionMetadata().GetTaskExecutionID().GetID().TaskId, taskID) - assert.Equal(t, got.TaskExecutionMetadata().GetTaskExecutionID().GetID().RetryAttempt, uint32(1)) - assert.Equal(t, got.TaskExecutionMetadata().GetTaskExecutionID().GetID().NodeExecutionId.GetNodeId(), nodeID) - assert.Equal(t, got.TaskExecutionMetadata().GetTaskExecutionID().GetID().NodeExecutionId.GetExecutionId(), wfExecID) + assert.Equal(t, got.TaskExecutionMetadata().GetTaskExecutionID().GetID().TaskId, taskID) //nolint:protogetter + assert.Equal(t, got.TaskExecutionMetadata().GetTaskExecutionID().GetID().RetryAttempt, uint32(1)) //nolint:protogetter + assert.Equal(t, got.TaskExecutionMetadata().GetTaskExecutionID().GetID().NodeExecutionId.GetNodeId(), nodeID) //nolint:protogetter + assert.Equal(t, got.TaskExecutionMetadata().GetTaskExecutionID().GetID().NodeExecutionId.GetExecutionId(), wfExecID) //nolint:protogetter assert.Equal(t, got.TaskExecutionMetadata().GetTaskExecutionID().GetUniqueNodeID(), nodeID) assert.EqualValues(t, got.ResourceManager().(resourcemanager.TaskResourceManager).GetResourcePoolInfo(), make([]*event.ResourcePoolInfo, 0)) diff --git a/flytepropeller/pkg/controller/nodes/task/transformer.go b/flytepropeller/pkg/controller/nodes/task/transformer.go index 242c1334ce..5412994732 100644 --- a/flytepropeller/pkg/controller/nodes/task/transformer.go +++ b/flytepropeller/pkg/controller/nodes/task/transformer.go @@ -57,16 +57,16 @@ func ToTaskEventPhase(p pluginCore.Phase) core.TaskExecution_Phase { func getParentNodeExecIDForTask(taskExecID *core.TaskExecutionIdentifier, execContext executors.ExecutionContext) (*core.NodeExecutionIdentifier, error) { nodeExecutionID := &core.NodeExecutionIdentifier{ - ExecutionId: taskExecID.NodeExecutionId.ExecutionId, + ExecutionId: taskExecID.GetNodeExecutionId().GetExecutionId(), } if execContext.GetEventVersion() != v1alpha1.EventVersion0 { - currentNodeUniqueID, err := common.GenerateUniqueID(execContext.GetParentInfo(), taskExecID.NodeExecutionId.NodeId) + currentNodeUniqueID, err := common.GenerateUniqueID(execContext.GetParentInfo(), taskExecID.GetNodeExecutionId().GetNodeId()) if err != nil { return nil, err } nodeExecutionID.NodeId = currentNodeUniqueID } else { - nodeExecutionID.NodeId = taskExecID.NodeExecutionId.NodeId + nodeExecutionID.NodeId = taskExecID.GetNodeExecutionId().GetNodeId() } return nodeExecutionID, nil } @@ -145,9 +145,9 @@ func ToTaskExecutionEvent(input ToTaskExecutionEventInputs) (*event.TaskExecutio }) } tev := &event.TaskExecutionEvent{ - TaskId: taskExecID.TaskId, + TaskId: taskExecID.GetTaskId(), ParentNodeExecutionId: nodeExecutionID, - RetryAttempt: taskExecID.RetryAttempt, + RetryAttempt: taskExecID.GetRetryAttempt(), Phase: ToTaskEventPhase(input.Info.Phase()), PhaseVersion: input.Info.Version(), ProducerId: input.ClusterID, diff --git a/flytepropeller/pkg/controller/nodes/task/transformer_test.go b/flytepropeller/pkg/controller/nodes/task/transformer_test.go index db89dda3e6..825b58a2ab 100644 --- a/flytepropeller/pkg/controller/nodes/task/transformer_test.go +++ b/flytepropeller/pkg/controller/nodes/task/transformer_test.go @@ -99,21 +99,21 @@ func TestToTaskExecutionEvent(t *testing.T) { }, }) assert.NoError(t, err) - assert.Nil(t, tev.Logs) - assert.Equal(t, core.TaskExecution_WAITING_FOR_RESOURCES, tev.Phase) - assert.Equal(t, uint32(0), tev.PhaseVersion) - assert.Equal(t, np, tev.OccurredAt) - assert.Equal(t, tkID, tev.TaskId) - assert.Equal(t, nodeID, tev.ParentNodeExecutionId) + assert.Nil(t, tev.GetLogs()) + assert.Equal(t, core.TaskExecution_WAITING_FOR_RESOURCES, tev.GetPhase()) + assert.Equal(t, uint32(0), tev.GetPhaseVersion()) + assert.Equal(t, np, tev.GetOccurredAt()) + assert.Equal(t, tkID, tev.GetTaskId()) + assert.Equal(t, nodeID, tev.GetParentNodeExecutionId()) assert.Equal(t, inputPath, tev.GetInputUri()) - assert.Nil(t, tev.OutputResult) - assert.Equal(t, event.TaskExecutionMetadata_INTERRUPTIBLE, tev.Metadata.InstanceClass) - assert.Equal(t, containerTaskType, tev.TaskType) - assert.Equal(t, "reason", tev.Reason) - assert.Equal(t, containerPluginIdentifier, tev.Metadata.PluginIdentifier) - assert.Equal(t, generatedName, tev.Metadata.GeneratedName) - assert.EqualValues(t, resourcePoolInfo, tev.Metadata.ResourcePoolInfo) - assert.Equal(t, testClusterID, tev.ProducerId) + assert.Nil(t, tev.GetOutputResult()) + assert.Equal(t, event.TaskExecutionMetadata_INTERRUPTIBLE, tev.GetMetadata().GetInstanceClass()) + assert.Equal(t, containerTaskType, tev.GetTaskType()) + assert.Equal(t, "reason", tev.GetReason()) + assert.Equal(t, containerPluginIdentifier, tev.GetMetadata().GetPluginIdentifier()) + assert.Equal(t, generatedName, tev.GetMetadata().GetGeneratedName()) + assert.EqualValues(t, resourcePoolInfo, tev.GetMetadata().GetResourcePoolInfo()) + assert.Equal(t, testClusterID, tev.GetProducerId()) l := []*core.TaskLog{ {Uri: "x", Name: "y", MessageFormat: core.TaskLog_JSON}, @@ -139,21 +139,21 @@ func TestToTaskExecutionEvent(t *testing.T) { }, }) assert.NoError(t, err) - assert.Equal(t, core.TaskExecution_RUNNING, tev.Phase) - assert.Equal(t, uint32(1), tev.PhaseVersion) - assert.Equal(t, l, tev.Logs) - assert.Equal(t, c, tev.CustomInfo) - assert.Equal(t, np, tev.OccurredAt) - assert.Equal(t, tkID, tev.TaskId) - assert.Equal(t, nodeID, tev.ParentNodeExecutionId) + assert.Equal(t, core.TaskExecution_RUNNING, tev.GetPhase()) + assert.Equal(t, uint32(1), tev.GetPhaseVersion()) + assert.Equal(t, l, tev.GetLogs()) + assert.Equal(t, c, tev.GetCustomInfo()) + assert.Equal(t, np, tev.GetOccurredAt()) + assert.Equal(t, tkID, tev.GetTaskId()) + assert.Equal(t, nodeID, tev.GetParentNodeExecutionId()) assert.Equal(t, inputPath, tev.GetInputUri()) - assert.Nil(t, tev.OutputResult) - assert.Equal(t, event.TaskExecutionMetadata_INTERRUPTIBLE, tev.Metadata.InstanceClass) - assert.Equal(t, containerTaskType, tev.TaskType) - assert.Equal(t, containerPluginIdentifier, tev.Metadata.PluginIdentifier) - assert.Equal(t, generatedName, tev.Metadata.GeneratedName) - assert.EqualValues(t, resourcePoolInfo, tev.Metadata.ResourcePoolInfo) - assert.Equal(t, testClusterID, tev.ProducerId) + assert.Nil(t, tev.GetOutputResult()) + assert.Equal(t, event.TaskExecutionMetadata_INTERRUPTIBLE, tev.GetMetadata().GetInstanceClass()) + assert.Equal(t, containerTaskType, tev.GetTaskType()) + assert.Equal(t, containerPluginIdentifier, tev.GetMetadata().GetPluginIdentifier()) + assert.Equal(t, generatedName, tev.GetMetadata().GetGeneratedName()) + assert.EqualValues(t, resourcePoolInfo, tev.GetMetadata().GetResourcePoolInfo()) + assert.Equal(t, testClusterID, tev.GetProducerId()) defaultNodeExecutionMetadata := nodemocks.NodeExecutionMetadata{} defaultNodeExecutionMetadata.OnIsInterruptible().Return(false) @@ -177,23 +177,23 @@ func TestToTaskExecutionEvent(t *testing.T) { }, }) assert.NoError(t, err) - assert.Equal(t, core.TaskExecution_SUCCEEDED, tev.Phase) - assert.Equal(t, uint32(0), tev.PhaseVersion) - assert.Equal(t, l, tev.Logs) - assert.Equal(t, c, tev.CustomInfo) - assert.Equal(t, np, tev.OccurredAt) - assert.Equal(t, np, tev.OccurredAt) - assert.Equal(t, tkID, tev.TaskId) - assert.Equal(t, nodeID, tev.ParentNodeExecutionId) - assert.NotNil(t, tev.OutputResult) + assert.Equal(t, core.TaskExecution_SUCCEEDED, tev.GetPhase()) + assert.Equal(t, uint32(0), tev.GetPhaseVersion()) + assert.Equal(t, l, tev.GetLogs()) + assert.Equal(t, c, tev.GetCustomInfo()) + assert.Equal(t, np, tev.GetOccurredAt()) + assert.Equal(t, np, tev.GetOccurredAt()) + assert.Equal(t, tkID, tev.GetTaskId()) + assert.Equal(t, nodeID, tev.GetParentNodeExecutionId()) + assert.NotNil(t, tev.GetOutputResult()) assert.Equal(t, inputPath, tev.GetInputUri()) assert.Equal(t, outputPath, tev.GetOutputUri()) - assert.Empty(t, event.TaskExecutionMetadata_DEFAULT, tev.Metadata.InstanceClass) - assert.Equal(t, containerTaskType, tev.TaskType) - assert.Equal(t, containerPluginIdentifier, tev.Metadata.PluginIdentifier) - assert.Equal(t, generatedName, tev.Metadata.GeneratedName) - assert.EqualValues(t, resourcePoolInfo, tev.Metadata.ResourcePoolInfo) - assert.Equal(t, testClusterID, tev.ProducerId) + assert.Empty(t, event.TaskExecutionMetadata_DEFAULT, tev.GetMetadata().GetInstanceClass()) + assert.Equal(t, containerTaskType, tev.GetTaskType()) + assert.Equal(t, containerPluginIdentifier, tev.GetMetadata().GetPluginIdentifier()) + assert.Equal(t, generatedName, tev.GetMetadata().GetGeneratedName()) + assert.EqualValues(t, resourcePoolInfo, tev.GetMetadata().GetResourcePoolInfo()) + assert.Equal(t, testClusterID, tev.GetProducerId()) t.Run("inline event policy", func(t *testing.T) { inputs := &core.LiteralMap{ @@ -297,21 +297,21 @@ func TestToTaskExecutionEventWithParent(t *testing.T) { expectedNodeID := &core.NodeExecutionIdentifier{ NodeId: "fmxzd5ta", } - assert.Nil(t, tev.Logs) - assert.Equal(t, core.TaskExecution_WAITING_FOR_RESOURCES, tev.Phase) - assert.Equal(t, uint32(0), tev.PhaseVersion) - assert.Equal(t, np, tev.OccurredAt) - assert.Equal(t, tkID, tev.TaskId) - assert.Equal(t, expectedNodeID, tev.ParentNodeExecutionId) + assert.Nil(t, tev.GetLogs()) + assert.Equal(t, core.TaskExecution_WAITING_FOR_RESOURCES, tev.GetPhase()) + assert.Equal(t, uint32(0), tev.GetPhaseVersion()) + assert.Equal(t, np, tev.GetOccurredAt()) + assert.Equal(t, tkID, tev.GetTaskId()) + assert.Equal(t, expectedNodeID, tev.GetParentNodeExecutionId()) assert.Equal(t, inputPath, tev.GetInputUri()) - assert.Nil(t, tev.OutputResult) - assert.Equal(t, event.TaskExecutionMetadata_INTERRUPTIBLE, tev.Metadata.InstanceClass) - assert.Equal(t, containerTaskType, tev.TaskType) - assert.Equal(t, "reason", tev.Reason) - assert.Equal(t, containerPluginIdentifier, tev.Metadata.PluginIdentifier) - assert.Equal(t, generatedName, tev.Metadata.GeneratedName) - assert.EqualValues(t, resourcePoolInfo, tev.Metadata.ResourcePoolInfo) - assert.Equal(t, testClusterID, tev.ProducerId) + assert.Nil(t, tev.GetOutputResult()) + assert.Equal(t, event.TaskExecutionMetadata_INTERRUPTIBLE, tev.GetMetadata().GetInstanceClass()) + assert.Equal(t, containerTaskType, tev.GetTaskType()) + assert.Equal(t, "reason", tev.GetReason()) + assert.Equal(t, containerPluginIdentifier, tev.GetMetadata().GetPluginIdentifier()) + assert.Equal(t, generatedName, tev.GetMetadata().GetGeneratedName()) + assert.EqualValues(t, resourcePoolInfo, tev.GetMetadata().GetResourcePoolInfo()) + assert.Equal(t, testClusterID, tev.GetProducerId()) l := []*core.TaskLog{ {Uri: "x", Name: "y", MessageFormat: core.TaskLog_JSON}, @@ -337,19 +337,19 @@ func TestToTaskExecutionEventWithParent(t *testing.T) { }, }) assert.NoError(t, err) - assert.Equal(t, core.TaskExecution_RUNNING, tev.Phase) - assert.Equal(t, uint32(1), tev.PhaseVersion) - assert.Equal(t, l, tev.Logs) - assert.Equal(t, c, tev.CustomInfo) - assert.Equal(t, np, tev.OccurredAt) - assert.Equal(t, tkID, tev.TaskId) - assert.Equal(t, expectedNodeID, tev.ParentNodeExecutionId) + assert.Equal(t, core.TaskExecution_RUNNING, tev.GetPhase()) + assert.Equal(t, uint32(1), tev.GetPhaseVersion()) + assert.Equal(t, l, tev.GetLogs()) + assert.Equal(t, c, tev.GetCustomInfo()) + assert.Equal(t, np, tev.GetOccurredAt()) + assert.Equal(t, tkID, tev.GetTaskId()) + assert.Equal(t, expectedNodeID, tev.GetParentNodeExecutionId()) assert.Equal(t, inputPath, tev.GetInputUri()) - assert.Nil(t, tev.OutputResult) - assert.Equal(t, event.TaskExecutionMetadata_INTERRUPTIBLE, tev.Metadata.InstanceClass) - assert.Equal(t, containerTaskType, tev.TaskType) - assert.Equal(t, containerPluginIdentifier, tev.Metadata.PluginIdentifier) - assert.Equal(t, generatedName, tev.Metadata.GeneratedName) - assert.EqualValues(t, resourcePoolInfo, tev.Metadata.ResourcePoolInfo) - assert.Equal(t, testClusterID, tev.ProducerId) + assert.Nil(t, tev.GetOutputResult()) + assert.Equal(t, event.TaskExecutionMetadata_INTERRUPTIBLE, tev.GetMetadata().GetInstanceClass()) + assert.Equal(t, containerTaskType, tev.GetTaskType()) + assert.Equal(t, containerPluginIdentifier, tev.GetMetadata().GetPluginIdentifier()) + assert.Equal(t, generatedName, tev.GetMetadata().GetGeneratedName()) + assert.EqualValues(t, resourcePoolInfo, tev.GetMetadata().GetResourcePoolInfo()) + assert.Equal(t, testClusterID, tev.GetProducerId()) } diff --git a/flytepropeller/pkg/controller/nodes/task_reader.go b/flytepropeller/pkg/controller/nodes/task_reader.go index 5cc5654f63..baf8123944 100644 --- a/flytepropeller/pkg/controller/nodes/task_reader.go +++ b/flytepropeller/pkg/controller/nodes/task_reader.go @@ -12,7 +12,7 @@ type taskReader struct { } func (t taskReader) GetTaskType() v1alpha1.TaskType { - return t.TaskTemplate.Type + return t.TaskTemplate.GetType() } func (t taskReader) GetTaskID() *core.Identifier { diff --git a/flytepropeller/pkg/controller/nodes/transformers.go b/flytepropeller/pkg/controller/nodes/transformers.go index a252d17344..ceeaf5aaec 100644 --- a/flytepropeller/pkg/controller/nodes/transformers.go +++ b/flytepropeller/pkg/controller/nodes/transformers.go @@ -91,7 +91,7 @@ func ToNodeExecutionEvent( return nil, nil } if info.GetPhase() == handler.EPhaseUndefined { - return nil, fmt.Errorf("illegal state, undefined phase received for node [%s]", nodeExecID.NodeId) + return nil, fmt.Errorf("illegal state, undefined phase received for node [%s]", nodeExecID.GetNodeId()) } occurredTime, err := ptypes.TimestampProto(info.GetOccurredAt()) if err != nil { @@ -115,7 +115,7 @@ func ToNodeExecutionEvent( // Start node is special case where the Outputs are the same and hence here we copy the Output file // into the OutputResult and in admin we copy it over into input as well. // Start node doesn't have inputs. - if nodeExecID.NodeId == v1alpha1.StartNodeID { + if nodeExecID.GetNodeId() == v1alpha1.StartNodeID { outputsFile := v1alpha1.GetOutputsFile(status.GetOutputDir()) nev = &event.NodeExecutionEvent{ Id: nodeExecID, @@ -162,7 +162,7 @@ func ToNodeExecutionEvent( } if eventVersion != v1alpha1.EventVersion0 { - currentNodeUniqueID, err := common.GenerateUniqueID(parentInfo, nev.Id.NodeId) + currentNodeUniqueID, err := common.GenerateUniqueID(parentInfo, nev.GetId().GetNodeId()) if err != nil { return nil, err } @@ -210,7 +210,7 @@ func ToNodeExecutionEvent( } } else if dynamicNodePhase != v1alpha1.DynamicNodePhaseNone { nev.IsDynamic = true - if nev.GetTaskNodeMetadata() != nil && nev.GetTaskNodeMetadata().DynamicWorkflow != nil { + if nev.GetTaskNodeMetadata() != nil && nev.GetTaskNodeMetadata().GetDynamicWorkflow() != nil { nev.IsParent = true } } @@ -314,6 +314,7 @@ func UpdateNodeStatus(np v1alpha1.NodePhase, p handler.PhaseInfo, n interfaces.N t.SetSubNodeTaskPhases(na.SubNodeTaskPhases) t.SetSubNodeRetryAttempts(na.SubNodeRetryAttempts) t.SetSubNodeSystemFailures(na.SubNodeSystemFailures) + t.SetSubNodeDeltaTimestamps(na.SubNodeDeltaTimestamps) t.SetTaskPhaseVersion(na.TaskPhaseVersion) } } diff --git a/flytepropeller/pkg/controller/nodes/transformers_test.go b/flytepropeller/pkg/controller/nodes/transformers_test.go index 93a532a8d6..0bbc02f123 100644 --- a/flytepropeller/pkg/controller/nodes/transformers_test.go +++ b/flytepropeller/pkg/controller/nodes/transformers_test.go @@ -56,10 +56,10 @@ func TestToNodeExecutionEvent(t *testing.T) { RawOutputPolicy: config.RawOutputPolicyReference, }, nil) assert.NoError(t, err) - assert.True(t, nev.IsDynamic) - assert.True(t, nev.IsParent) - assert.Equal(t, nodeExecutionEventVersion, nev.EventVersion) - assert.True(t, nev.IsInDynamicChain) + assert.True(t, nev.GetIsDynamic()) + assert.True(t, nev.GetIsParent()) + assert.Equal(t, nodeExecutionEventVersion, nev.GetEventVersion()) + assert.True(t, nev.GetIsInDynamicChain()) }) t.Run("is parent", func(t *testing.T) { info := handler.PhaseInfoDynamicRunning(&handler.ExecutionInfo{TaskNodeInfo: &handler.TaskNodeInfo{ @@ -92,9 +92,9 @@ func TestToNodeExecutionEvent(t *testing.T) { RawOutputPolicy: config.RawOutputPolicyReference, }, nil) assert.NoError(t, err) - assert.False(t, nev.IsDynamic) - assert.True(t, nev.IsParent) - assert.Equal(t, nodeExecutionEventVersion, nev.EventVersion) + assert.False(t, nev.GetIsDynamic()) + assert.True(t, nev.GetIsParent()) + assert.Equal(t, nodeExecutionEventVersion, nev.GetEventVersion()) }) t.Run("inline events", func(t *testing.T) { inputs := &core.LiteralMap{ diff --git a/flytepropeller/pkg/controller/rate_limiter.go b/flytepropeller/pkg/controller/rate_limiter.go new file mode 100644 index 0000000000..100d6aa82c --- /dev/null +++ b/flytepropeller/pkg/controller/rate_limiter.go @@ -0,0 +1,116 @@ +package controller + +import ( + "context" + "sync" + "time" + + "golang.org/x/time/rate" + "k8s.io/client-go/util/workqueue" + + "github.com/flyteorg/flyte/flytepropeller/pkg/controller/interfaces" +) + +// limiterAdapter adapts rate.NewLimiter to use the Reservation interface so that it can be used in unittests. +type limiterAdapter struct { + limiter *rate.Limiter +} + +func NewLimiter(r rate.Limit, b int) interfaces.Limiter { + return &limiterAdapter{rate.NewLimiter(r, b)} +} + +func (l *limiterAdapter) Allow() bool { + return l.limiter.Allow() +} + +func (l *limiterAdapter) AllowN(t time.Time, n int) bool { + return l.limiter.AllowN(t, n) +} + +func (l *limiterAdapter) Burst() int { + return l.limiter.Burst() +} + +func (l *limiterAdapter) Limit() rate.Limit { + return l.limiter.Limit() +} + +func (l *limiterAdapter) Reserve() interfaces.Reservation { + return l.limiter.Reserve() +} + +func (l *limiterAdapter) ReserveN(t time.Time, n int) interfaces.Reservation { + return l.limiter.ReserveN(t, n) +} +func (l *limiterAdapter) SetBurst(newBurst int) { + l.limiter.SetBurst(newBurst) +} + +func (l *limiterAdapter) SetBurstAt(t time.Time, newBurst int) { + l.limiter.SetBurstAt(t, newBurst) +} + +func (l *limiterAdapter) SetLimit(newLimit rate.Limit) { + l.limiter.SetLimit(newLimit) +} + +func (l *limiterAdapter) SetLimitAt(t time.Time, newLimit rate.Limit) { + l.limiter.SetLimitAt(t, newLimit) +} + +func (l *limiterAdapter) Tokens() float64 { + return l.limiter.Tokens() +} + +func (l *limiterAdapter) TokensAt(t time.Time) float64 { + return l.limiter.TokensAt(t) +} + +func (l *limiterAdapter) Wait(ctx context.Context) (err error) { + return l.limiter.Wait(ctx) +} + +func (l *limiterAdapter) WaitN(ctx context.Context, n int) (err error) { + return l.limiter.WaitN(ctx, n) +} + +// Similar to the standard BucketRateLimiter but dedupes items in order to avoid reserving token slots for the +// same item multiple times. Intened to be used with a DelayingQueue, which dedupes items on insertion. +type dedupingBucketRateLimiter struct { + Limiter interfaces.Limiter + mu sync.Mutex + reservations map[interface{}]interfaces.Reservation +} + +func NewDedupingBucketRateLimiter(limiter interfaces.Limiter) workqueue.RateLimiter { + return &dedupingBucketRateLimiter{ + Limiter: limiter, + reservations: make(map[interface{}]interfaces.Reservation), + } +} + +var _ workqueue.RateLimiter = &dedupingBucketRateLimiter{} + +func (r *dedupingBucketRateLimiter) When(item interface{}) time.Duration { + r.mu.Lock() + defer r.mu.Unlock() + // Check if this item has an outstanding reservation. If so, use it to avoid a duplicate reservation. + if res, ok := r.reservations[item]; ok && res.Delay() > 0 { + return res.Delay() + } + r.reservations[item] = r.Limiter.Reserve() + return r.reservations[item].Delay() +} + +func (r *dedupingBucketRateLimiter) NumRequeues(item interface{}) int { + return 0 +} + +func (r *dedupingBucketRateLimiter) Forget(item interface{}) { + r.mu.Lock() + defer r.mu.Unlock() + if res, ok := r.reservations[item]; ok && res.Delay() <= 0 { + delete(r.reservations, item) + } +} diff --git a/flytepropeller/pkg/controller/rate_limiter_test.go b/flytepropeller/pkg/controller/rate_limiter_test.go new file mode 100644 index 0000000000..16e5bae417 --- /dev/null +++ b/flytepropeller/pkg/controller/rate_limiter_test.go @@ -0,0 +1,98 @@ +package controller + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + + "github.com/flyteorg/flyte/flytepropeller/pkg/controller/mocks" +) + +type rateLimiterTests struct { + suite.Suite + limiter *mocks.Limiter + deduping *dedupingBucketRateLimiter +} + +func TestDedupingBucketRateLimiter(t *testing.T) { + suite.Run(t, &rateLimiterTests{}) +} + +func (s *rateLimiterTests) SetupTest() { + s.limiter = mocks.NewLimiter(s.T()) + s.deduping = NewDedupingBucketRateLimiter(s.limiter).(*dedupingBucketRateLimiter) +} + +func (s *rateLimiterTests) TearDownTest() { + s.limiter.AssertExpectations(s.T()) +} + +func (s *rateLimiterTests) Test_When_NotFound() { + newReservation := mocks.NewReservation(s.T()) + defer newReservation.AssertExpectations(s.T()) + newReservation.EXPECT().Delay().Return(time.Minute).Once() + s.limiter.EXPECT().Reserve().Return(newReservation).Once() + + d := s.deduping.When("item1") + + assert.Equal(s.T(), newReservation, s.deduping.reservations["item1"]) + assert.Equal(s.T(), time.Minute, d) +} + +func (s *rateLimiterTests) Test_When_FoundPast() { + pastReservation := mocks.NewReservation(s.T()) + defer pastReservation.AssertExpectations(s.T()) + pastReservation.EXPECT().Delay().Return(-time.Minute).Once() + s.deduping.reservations["item1"] = pastReservation + newReservation := mocks.NewReservation(s.T()) + defer newReservation.AssertExpectations(s.T()) + newReservation.EXPECT().Delay().Return(time.Minute).Once() + s.limiter.EXPECT().Reserve().Return(newReservation).Once() + + d := s.deduping.When("item1") + + assert.Equal(s.T(), newReservation, s.deduping.reservations["item1"]) + assert.Equal(s.T(), time.Minute, d) +} + +func (s *rateLimiterTests) Test_When_FoundFuture() { + futureReservation := mocks.NewReservation(s.T()) + defer futureReservation.AssertExpectations(s.T()) + futureReservation.EXPECT().Delay().Return(time.Minute).Twice() + s.deduping.reservations["item1"] = futureReservation + + d := s.deduping.When("item1") + + assert.Equal(s.T(), futureReservation, s.deduping.reservations["item1"]) + assert.Equal(s.T(), time.Minute, d) +} + +func (s *rateLimiterTests) Test_Forget_NotFound() { + s.deduping.Forget("item1") + + assert.NotContains(s.T(), s.deduping.reservations, "item1") +} + +func (s *rateLimiterTests) Test_Forget_PastReservation() { + pastReservation := mocks.NewReservation(s.T()) + defer pastReservation.AssertExpectations(s.T()) + pastReservation.EXPECT().Delay().Return(-time.Minute).Once() + s.deduping.reservations["item1"] = pastReservation + + s.deduping.Forget("item1") + + assert.NotContains(s.T(), s.deduping.reservations, "item1") +} + +func (s *rateLimiterTests) Test_Forget_FutureReservation() { + futureReservation := mocks.NewReservation(s.T()) + defer futureReservation.AssertExpectations(s.T()) + futureReservation.EXPECT().Delay().Return(time.Minute).Once() + s.deduping.reservations["item1"] = futureReservation + + s.deduping.Forget("item1") + + assert.Equal(s.T(), futureReservation, s.deduping.reservations["item1"]) +} diff --git a/flytepropeller/pkg/controller/workflow/executor.go b/flytepropeller/pkg/controller/workflow/executor.go index 1982b405cb..14a3770cff 100644 --- a/flytepropeller/pkg/controller/workflow/executor.go +++ b/flytepropeller/pkg/controller/workflow/executor.go @@ -295,7 +295,7 @@ func (c *workflowExecutor) IdempotentReportEvent(ctx context.Context, e *event.W err := c.wfRecorder.RecordWorkflowEvent(ctx, e, c.eventConfig) if err != nil && eventsErr.IsAlreadyExists(err) { logger.Infof(ctx, "Workflow event phase: %s, executionId %s already exist", - e.Phase.String(), e.ExecutionId) + e.GetPhase().String(), e.GetExecutionId()) return nil } return err @@ -370,21 +370,21 @@ func (c *workflowExecutor) TransitionToPhase(ctx context.Context, execID *core.W if recordingErr := c.IdempotentReportEvent(ctx, wfEvent); recordingErr != nil { if eventsErr.IsAlreadyExists(recordingErr) { - logger.Warningf(ctx, "Failed to record workflowEvent, error [%s]. Trying to record state: %s. Ignoring this error!", recordingErr.Error(), wfEvent.Phase) + logger.Warningf(ctx, "Failed to record workflowEvent, error [%s]. Trying to record state: %s. Ignoring this error!", recordingErr.Error(), wfEvent.GetPhase()) return nil } if eventsErr.IsEventAlreadyInTerminalStateError(recordingErr) { // Move to WorkflowPhaseFailed for state mismatch - msg := fmt.Sprintf("workflow state mismatch between propeller and control plane; Propeller State: %s, ExecutionId %s", wfEvent.Phase.String(), wfEvent.ExecutionId) + msg := fmt.Sprintf("workflow state mismatch between propeller and control plane; Propeller State: %s, ExecutionId %s", wfEvent.GetPhase().String(), wfEvent.GetExecutionId()) logger.Warningf(ctx, msg) wStatus.UpdatePhase(v1alpha1.WorkflowPhaseFailed, msg, nil) return nil } - if (wfEvent.Phase == core.WorkflowExecution_FAILING || wfEvent.Phase == core.WorkflowExecution_FAILED) && + if (wfEvent.GetPhase() == core.WorkflowExecution_FAILING || wfEvent.GetPhase() == core.WorkflowExecution_FAILED) && (eventsErr.IsNotFound(recordingErr) || eventsErr.IsEventIncompatibleClusterError(recordingErr)) { // Don't stall the workflow transition to terminated (so that resources can be cleaned up) since these events // are being discarded by the back-end anyways. - logger.Infof(ctx, "Failed to record %s workflowEvent, error [%s]. Ignoring this error!", wfEvent.Phase.String(), recordingErr.Error()) + logger.Infof(ctx, "Failed to record %s workflowEvent, error [%s]. Ignoring this error!", wfEvent.GetPhase().String(), recordingErr.Error()) return nil } logger.Warningf(ctx, "Event recording failed. Error [%s]", recordingErr.Error()) @@ -461,7 +461,7 @@ func (c *workflowExecutor) HandleFlyteWorkflow(ctx context.Context, w *v1alpha1. case v1alpha1.WorkflowPhaseHandlingFailureNode: newStatus, err := c.handleFailureNode(ctx, w) if err != nil { - return errors.Errorf("failed to handle failure node for workflow [%s], err: [%s]", w.ID, err.Error()) + return errors.Errorf("failed to handle failure node for workflow [%s], err: [%s]", w.ID, err.Error()) //nolint:govet,staticcheck } failureErr := c.TransitionToPhase(ctx, w.ExecutionID.WorkflowExecutionIdentifier, wStatus, newStatus) // Ignore ExecutionNotFound and IncompatibleCluster errors to allow graceful failure diff --git a/flytepropeller/pkg/controller/workflow/executor_test.go b/flytepropeller/pkg/controller/workflow/executor_test.go index 2be7238dbb..187aac7ead 100644 --- a/flytepropeller/pkg/controller/workflow/executor_test.go +++ b/flytepropeller/pkg/controller/workflow/executor_test.go @@ -88,14 +88,14 @@ func (f fakeRemoteWritePlugin) Handle(ctx context.Context, tCtx pluginCore.TaskE if trns.Info().Phase() == pluginCore.PhaseSuccess { tk, err := tCtx.TaskReader().Read(ctx) assert.NoError(f.t, err) - outputVars := tk.GetInterface().Outputs.Variables + outputVars := tk.GetInterface().GetOutputs().GetVariables() o := &core.LiteralMap{ Literals: make(map[string]*core.Literal, len(outputVars)), } for k, v := range outputVars { - l, err := coreutils.MakeDefaultLiteralForType(v.Type) + l, err := coreutils.MakeDefaultLiteralForType(v.GetType()) if f.enableAsserts && !assert.NoError(f.t, err) { - assert.FailNow(f.t, "Failed to create default output for node [%v] Type [%v]", tCtx.TaskExecutionMetadata().GetTaskExecutionID(), v.Type) + assert.FailNow(f.t, "Failed to create default output for node [%v] Type [%v]", tCtx.TaskExecutionMetadata().GetTaskExecutionID(), v.GetType()) } o.Literals[k] = l } @@ -488,21 +488,21 @@ func TestWorkflowExecutor_HandleFlyteWorkflow_Failing(t *testing.T) { if ok { assert.True(t, ok) - switch e.Phase { + switch e.GetPhase() { case core.WorkflowExecution_RUNNING: - assert.WithinDuration(t, e.OccurredAt.AsTime(), time.Now(), time.Millisecond*5) - assert.Equal(t, testClusterID, e.ProducerId) + assert.WithinDuration(t, e.GetOccurredAt().AsTime(), time.Now(), time.Millisecond*5) + assert.Equal(t, testClusterID, e.GetProducerId()) recordedRunning = true case core.WorkflowExecution_FAILING: - assert.WithinDuration(t, e.OccurredAt.AsTime(), time.Now(), time.Millisecond*5) - assert.Equal(t, testClusterID, e.ProducerId) + assert.WithinDuration(t, e.GetOccurredAt().AsTime(), time.Now(), time.Millisecond*5) + assert.Equal(t, testClusterID, e.GetProducerId()) recordedFailing = true case core.WorkflowExecution_FAILED: - assert.WithinDuration(t, e.OccurredAt.AsTime(), time.Now(), time.Millisecond*5) - assert.Equal(t, testClusterID, e.ProducerId) + assert.WithinDuration(t, e.GetOccurredAt().AsTime(), time.Now(), time.Millisecond*5) + assert.Equal(t, testClusterID, e.GetProducerId()) recordedFailed = true default: - return fmt.Errorf("MockWorkflowRecorder should not have entered into any other states [%v]", e.Phase) + return fmt.Errorf("MockWorkflowRecorder should not have entered into any other states [%v]", e.GetPhase()) } } return nil @@ -591,30 +591,30 @@ func TestWorkflowExecutor_HandleFlyteWorkflow_Events(t *testing.T) { eventSink.SinkCb = func(ctx context.Context, message proto.Message) error { e, ok := message.(*event.WorkflowExecutionEvent) if ok { - switch e.Phase { + switch e.GetPhase() { case core.WorkflowExecution_RUNNING: - occuredAt, err := ptypes.Timestamp(e.OccurredAt) + occuredAt, err := ptypes.Timestamp(e.GetOccurredAt()) assert.NoError(t, err) assert.WithinDuration(t, occuredAt, time.Now(), time.Millisecond*5) - assert.Equal(t, testClusterID, e.ProducerId) + assert.Equal(t, testClusterID, e.GetProducerId()) recordedRunning = true case core.WorkflowExecution_SUCCEEDING: - occuredAt, err := ptypes.Timestamp(e.OccurredAt) + occuredAt, err := ptypes.Timestamp(e.GetOccurredAt()) assert.NoError(t, err) assert.WithinDuration(t, occuredAt, time.Now(), time.Millisecond*5) - assert.Equal(t, testClusterID, e.ProducerId) + assert.Equal(t, testClusterID, e.GetProducerId()) recordedFailing = true case core.WorkflowExecution_SUCCEEDED: - occuredAt, err := ptypes.Timestamp(e.OccurredAt) + occuredAt, err := ptypes.Timestamp(e.GetOccurredAt()) assert.NoError(t, err) assert.WithinDuration(t, occuredAt, time.Now(), time.Millisecond*5) - assert.Equal(t, testClusterID, e.ProducerId) + assert.Equal(t, testClusterID, e.GetProducerId()) recordedSuccess = true default: - return fmt.Errorf("MockWorkflowRecorder should not have entered into any other states, received [%v]", e.Phase.String()) + return fmt.Errorf("MockWorkflowRecorder should not have entered into any other states, received [%v]", e.GetPhase().String()) } } return nil @@ -819,7 +819,7 @@ func TestWorkflowExecutor_HandleAbortedWorkflow(t *testing.T) { nodeExec := &nodemocks.Node{} wfRecorder := &eventMocks.WorkflowEventRecorder{} wfRecorder.On("RecordWorkflowEvent", mock.Anything, mock.MatchedBy(func(ev *event.WorkflowExecutionEvent) bool { - assert.Equal(t, testClusterID, ev.ProducerId) + assert.Equal(t, testClusterID, ev.GetProducerId()) evs = append(evs, ev) return true }), mock.Anything).Return(nil) @@ -861,7 +861,7 @@ func TestWorkflowExecutor_HandleAbortedWorkflow(t *testing.T) { nodeExec := &nodemocks.Node{} wfRecorder := &eventMocks.WorkflowEventRecorder{} wfRecorder.OnRecordWorkflowEventMatch(mock.Anything, mock.MatchedBy(func(ev *event.WorkflowExecutionEvent) bool { - assert.Equal(t, testClusterID, ev.ProducerId) + assert.Equal(t, testClusterID, ev.GetProducerId()) evs = append(evs, ev) return true }), mock.Anything).Return(nil) @@ -902,7 +902,7 @@ func TestWorkflowExecutor_HandleAbortedWorkflow(t *testing.T) { nodeExec := &nodemocks.Node{} wfRecorder := &eventMocks.WorkflowEventRecorder{} wfRecorder.OnRecordWorkflowEventMatch(mock.Anything, mock.MatchedBy(func(ev *event.WorkflowExecutionEvent) bool { - assert.Equal(t, testClusterID, ev.ProducerId) + assert.Equal(t, testClusterID, ev.GetProducerId()) evs = append(evs, ev) return true }), mock.Anything).Return(nil) diff --git a/flytepropeller/pkg/controller/workflowstore/execution_stats_test.go b/flytepropeller/pkg/controller/workflowstore/execution_stats_test.go index 783e2ba688..b3c7bb44fe 100644 --- a/flytepropeller/pkg/controller/workflowstore/execution_stats_test.go +++ b/flytepropeller/pkg/controller/workflowstore/execution_stats_test.go @@ -89,7 +89,7 @@ func TestConcurrentAccess(t *testing.T) { go func(id int) { defer wg.Done() execID := fmt.Sprintf("exec%d", id) - err := esh.AddOrUpdateEntry(execID, SingleExecutionStats{ActiveNodeCount: uint32(id), ActiveTaskCount: uint32(id * 2)}) + err := esh.AddOrUpdateEntry(execID, SingleExecutionStats{ActiveNodeCount: uint32(id), ActiveTaskCount: uint32(id * 2)}) // #nosec G115 assert.NoError(t, err) }(i) } diff --git a/flytepropeller/pkg/controller/workqueue.go b/flytepropeller/pkg/controller/workqueue.go index 1d10cb5f2a..fcf97c3e1c 100644 --- a/flytepropeller/pkg/controller/workqueue.go +++ b/flytepropeller/pkg/controller/workqueue.go @@ -18,10 +18,8 @@ func NewWorkQueue(ctx context.Context, cfg config.WorkqueueConfig, name string) case config.WorkqueueTypeBucketRateLimiter: logger.Infof(ctx, "Using Bucket Ratelimited Workqueue, Rate [%v] Capacity [%v]", cfg.Rate, cfg.Capacity) return workqueue.NewNamedRateLimitingQueue( - // 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item) - &workqueue.BucketRateLimiter{ - Limiter: rate.NewLimiter(rate.Limit(cfg.Rate), cfg.Capacity), - }, name), nil + NewDedupingBucketRateLimiter(NewLimiter(rate.Limit(cfg.Rate), cfg.Capacity)), + name), nil case config.WorkqueueTypeExponentialFailureRateLimiter: logger.Infof(ctx, "Using Exponential failure backoff Ratelimited Workqueue, Base Delay [%v], max Delay [%v]", cfg.BaseDelay, cfg.MaxDelay) return workqueue.NewNamedRateLimitingQueue( @@ -31,9 +29,7 @@ func NewWorkQueue(ctx context.Context, cfg config.WorkqueueConfig, name string) logger.Infof(ctx, "Using Max-of Ratelimited Workqueue, Bucket {Rate [%v] Capacity [%v]} | FailureBackoff {Base Delay [%v], max Delay [%v]}", cfg.Rate, cfg.Capacity, cfg.BaseDelay, cfg.MaxDelay) return workqueue.NewNamedRateLimitingQueue( workqueue.NewMaxOfRateLimiter( - &workqueue.BucketRateLimiter{ - Limiter: rate.NewLimiter(rate.Limit(cfg.Rate), cfg.Capacity), - }, + NewDedupingBucketRateLimiter(NewLimiter(rate.Limit(cfg.Rate), cfg.Capacity)), workqueue.NewItemExponentialFailureRateLimiter(cfg.BaseDelay.Duration, cfg.MaxDelay.Duration), ), name), nil diff --git a/flytepropeller/pkg/utils/assert/literals.go b/flytepropeller/pkg/utils/assert/literals.go index 66f57c328e..c0fac675ed 100644 --- a/flytepropeller/pkg/utils/assert/literals.go +++ b/flytepropeller/pkg/utils/assert/literals.go @@ -16,14 +16,14 @@ func EqualPrimitive(t *testing.T, p1 *core.Primitive, p2 *core.Primitive) { if p1 == nil { return } - assert.Equal(t, reflect.TypeOf(p1.Value), reflect.TypeOf(p2.Value)) - switch p1.Value.(type) { + assert.Equal(t, reflect.TypeOf(p1.GetValue()), reflect.TypeOf(p2.GetValue())) + switch p1.GetValue().(type) { case *core.Primitive_Integer: assert.Equal(t, p1.GetInteger(), p2.GetInteger()) case *core.Primitive_StringValue: assert.Equal(t, p1.GetStringValue(), p2.GetStringValue()) default: - assert.FailNow(t, "Not yet implemented for types %v", reflect.TypeOf(p1.Value)) + assert.FailNow(t, "Not yet implemented for types %v", reflect.TypeOf(p1.GetValue())) } } @@ -34,12 +34,12 @@ func EqualScalar(t *testing.T, p1 *core.Scalar, p2 *core.Scalar) { if p1 == nil { return } - assert.Equal(t, reflect.TypeOf(p1.Value), reflect.TypeOf(p2.Value)) - switch p1.Value.(type) { + assert.Equal(t, reflect.TypeOf(p1.GetValue()), reflect.TypeOf(p2.GetValue())) + switch p1.GetValue().(type) { case *core.Scalar_Primitive: EqualPrimitive(t, p1.GetPrimitive(), p2.GetPrimitive()) default: - assert.FailNow(t, "Not yet implemented for types %v", reflect.TypeOf(p1.Value)) + assert.FailNow(t, "Not yet implemented for types %v", reflect.TypeOf(p1.GetValue())) } } @@ -50,8 +50,8 @@ func EqualLiterals(t *testing.T, l1 *core.Literal, l2 *core.Literal) { if l1 == nil { return } - assert.Equal(t, reflect.TypeOf(l1.Value), reflect.TypeOf(l2.Value)) - switch l1.Value.(type) { + assert.Equal(t, reflect.TypeOf(l1.GetValue()), reflect.TypeOf(l2.GetValue())) + switch l1.GetValue().(type) { case *core.Literal_Scalar: EqualScalar(t, l1.GetScalar(), l2.GetScalar()) case *core.Literal_Map: @@ -63,9 +63,9 @@ func EqualLiterals(t *testing.T, l1 *core.Literal, l2 *core.Literal) { func EqualLiteralMap(t *testing.T, l1 *core.LiteralMap, l2 *core.LiteralMap) { if assert.NotNil(t, l1, "l1 is nil") && assert.NotNil(t, l2, "l2 is nil") { - assert.Equal(t, len(l1.Literals), len(l2.Literals)) - for k, v := range l1.Literals { - actual, ok := l2.Literals[k] + assert.Equal(t, len(l1.GetLiterals()), len(l2.GetLiterals())) + for k, v := range l1.GetLiterals() { + actual, ok := l2.GetLiterals()[k] assert.True(t, ok) EqualLiterals(t, v, actual) } @@ -74,9 +74,9 @@ func EqualLiteralMap(t *testing.T, l1 *core.LiteralMap, l2 *core.LiteralMap) { func EqualLiteralCollection(t *testing.T, l1 *core.LiteralCollection, l2 *core.LiteralCollection) { if assert.NotNil(t, l2) { - assert.Equal(t, len(l1.Literals), len(l2.Literals)) - for i, v := range l1.Literals { - EqualLiterals(t, v, l2.Literals[i]) + assert.Equal(t, len(l1.GetLiterals()), len(l2.GetLiterals())) + for i, v := range l1.GetLiterals() { + EqualLiterals(t, v, l2.GetLiterals()[i]) } } } diff --git a/flytepropeller/pkg/utils/bindings_test.go b/flytepropeller/pkg/utils/bindings_test.go index c6cb5fcc12..8067e256eb 100644 --- a/flytepropeller/pkg/utils/bindings_test.go +++ b/flytepropeller/pkg/utils/bindings_test.go @@ -18,7 +18,7 @@ func TestMakePrimitiveBinding(t *testing.T) { assert.Equal(t, "x", x.GetVar()) p := x.GetBinding() assert.NotNil(t, p.GetScalar()) - assert.Equal(t, "*core.Primitive_FloatValue", reflect.TypeOf(p.GetScalar().GetPrimitive().Value).String()) + assert.Equal(t, "*core.Primitive_FloatValue", reflect.TypeOf(p.GetScalar().GetPrimitive().GetValue()).String()) assert.Equal(t, v, p.GetScalar().GetPrimitive().GetFloatValue()) } { @@ -36,7 +36,7 @@ func TestMustMakePrimitiveBinding(t *testing.T) { assert.Equal(t, "x", x.GetVar()) p := x.GetBinding() assert.NotNil(t, p.GetScalar()) - assert.Equal(t, "*core.Primitive_FloatValue", reflect.TypeOf(p.GetScalar().GetPrimitive().Value).String()) + assert.Equal(t, "*core.Primitive_FloatValue", reflect.TypeOf(p.GetScalar().GetPrimitive().GetValue()).String()) assert.Equal(t, v, p.GetScalar().GetPrimitive().GetFloatValue()) } { @@ -62,26 +62,26 @@ func TestMakeBindingDataCollection(t *testing.T) { ) assert.NotNil(t, c.GetCollection()) - assert.Equal(t, 2, len(c.GetCollection().Bindings)) + assert.Equal(t, 2, len(c.GetCollection().GetBindings())) { p := c.GetCollection().GetBindings()[0] assert.NotNil(t, p.GetScalar()) - assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.GetScalar().GetPrimitive().Value).String()) + assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.GetScalar().GetPrimitive().GetValue()).String()) assert.Equal(t, v1, p.GetScalar().GetPrimitive().GetInteger()) } { p := c.GetCollection().GetBindings()[1] assert.NotNil(t, p.GetScalar()) - assert.Equal(t, "*core.Primitive_StringValue", reflect.TypeOf(p.GetScalar().GetPrimitive().Value).String()) + assert.Equal(t, "*core.Primitive_StringValue", reflect.TypeOf(p.GetScalar().GetPrimitive().GetValue()).String()) assert.Equal(t, v2, p.GetScalar().GetPrimitive().GetStringValue()) } assert.NotNil(t, c2.GetCollection()) - assert.Equal(t, 2, len(c2.GetCollection().Bindings)) + assert.Equal(t, 2, len(c2.GetCollection().GetBindings())) { p := c2.GetCollection().GetBindings()[0] assert.NotNil(t, p.GetScalar()) - assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.GetScalar().GetPrimitive().Value).String()) + assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.GetScalar().GetPrimitive().GetValue()).String()) assert.Equal(t, v1, p.GetScalar().GetPrimitive().GetInteger()) } { @@ -113,7 +113,7 @@ func TestMakeBindingDataMap(t *testing.T) { { p := m.GetMap().GetBindings()["x"] assert.NotNil(t, p.GetScalar()) - assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.GetScalar().GetPrimitive().Value).String()) + assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.GetScalar().GetPrimitive().GetValue()).String()) assert.Equal(t, v1, p.GetScalar().GetPrimitive().GetInteger()) } { @@ -127,7 +127,7 @@ func TestMakeBindingDataMap(t *testing.T) { { p := m2.GetMap().GetBindings()["x"] assert.NotNil(t, p.GetScalar()) - assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.GetScalar().GetPrimitive().Value).String()) + assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.GetScalar().GetPrimitive().GetValue()).String()) assert.Equal(t, v1, p.GetScalar().GetPrimitive().GetInteger()) } { diff --git a/flytepropeller/pkg/utils/k8s.go b/flytepropeller/pkg/utils/k8s.go index f666fd9013..7ef53ead38 100644 --- a/flytepropeller/pkg/utils/k8s.go +++ b/flytepropeller/pkg/utils/k8s.go @@ -37,7 +37,7 @@ var invalidDNS1123Characters = regexp.MustCompile("[^-a-z0-9]+") func ToK8sEnvVar(env []*core.KeyValuePair) []v1.EnvVar { envVars := make([]v1.EnvVar, 0, len(env)) for _, kv := range env { - envVars = append(envVars, v1.EnvVar{Name: kv.Key, Value: kv.Value}) + envVars = append(envVars, v1.EnvVar{Name: kv.GetKey(), Value: kv.GetValue()}) } return envVars } diff --git a/flytepropeller/pkg/visualize/visualize.go b/flytepropeller/pkg/visualize/visualize.go index 6a5ee7ba11..14128fde84 100644 --- a/flytepropeller/pkg/visualize/visualize.go +++ b/flytepropeller/pkg/visualize/visualize.go @@ -34,11 +34,11 @@ func flatten(binding *core.BindingData, flatMap map[common.NodeID]sets.String) { flatten(v, flatMap) } case *core.BindingData_Promise: - if _, ok := flatMap[binding.GetPromise().NodeId]; !ok { - flatMap[binding.GetPromise().NodeId] = sets.String{} + if _, ok := flatMap[binding.GetPromise().GetNodeId()]; !ok { + flatMap[binding.GetPromise().GetNodeId()] = sets.String{} } - flatMap[binding.GetPromise().NodeId].Insert(binding.GetPromise().GetVar()) + flatMap[binding.GetPromise().GetNodeId()].Insert(binding.GetPromise().GetVar()) case *core.BindingData_Scalar: if _, ok := flatMap[staticNodeID]; !ok { flatMap[staticNodeID] = sets.NewString() @@ -142,11 +142,11 @@ func WorkflowToGraphViz(g *v1alpha1.FlyteWorkflow) string { func ToGraphViz(g *core.CompiledWorkflow) string { res := fmt.Sprintf("digraph G {rankdir=TB;workflow[label=\"Workflow Id: %v\"];node[style=filled];", - g.Template.GetId()) + g.GetTemplate().GetId()) nodeFinder := func(nodeId common.NodeID) *core.Node { - for _, n := range g.Template.Nodes { - if n.Id == nodeId { + for _, n := range g.GetTemplate().GetNodes() { + if n.GetId() == nodeId { return n } } @@ -204,9 +204,9 @@ func ToGraphViz(g *core.CompiledWorkflow) string { node := nodesToVisit.Deque() nodes, found := g.GetConnections().GetDownstream()[node] if found { - nodesToVisit.Enqueue(nodes.Ids...) + nodesToVisit.Enqueue(nodes.GetIds()...) - for _, child := range nodes.Ids { + for _, child := range nodes.GetIds() { label := edgeLabel(node, child) edge := fmt.Sprintf("\"%v\" -> \"%v\" [label=\"%v\",style=\"%v\"];", nodeLabel(node), diff --git a/flytepropeller/pkg/webhook/aws_secret_manager.go b/flytepropeller/pkg/webhook/aws_secret_manager.go index d1595ffc1e..ad5e8c48f4 100644 --- a/flytepropeller/pkg/webhook/aws_secret_manager.go +++ b/flytepropeller/pkg/webhook/aws_secret_manager.go @@ -47,7 +47,7 @@ type AWSSecretManagerInjector struct { } func formatAWSSecretArn(secret *core.Secret) string { - return strings.TrimRight(secret.Group, ":") + ":" + strings.TrimLeft(secret.Key, ":") + return strings.TrimRight(secret.GetGroup(), ":") + ":" + strings.TrimLeft(secret.GetKey(), ":") } func formatAWSInitContainerName(index int) string { @@ -59,12 +59,12 @@ func (i AWSSecretManagerInjector) Type() config.SecretManagerType { } func (i AWSSecretManagerInjector) Inject(ctx context.Context, secret *core.Secret, p *corev1.Pod) (newP *corev1.Pod, injected bool, err error) { - if len(secret.Group) == 0 || len(secret.Key) == 0 { + if len(secret.GetGroup()) == 0 || len(secret.GetKey()) == 0 { return nil, false, fmt.Errorf("AWS Secrets Webhook require both key and group to be set. "+ "Secret: [%v]", secret) } - switch secret.MountRequirement { + switch secret.GetMountRequirement() { case core.Secret_ANY: fallthrough case core.Secret_FILE: @@ -112,7 +112,7 @@ func (i AWSSecretManagerInjector) Inject(ctx context.Context, secret *core.Secre case core.Secret_ENV_VAR: fallthrough default: - err := fmt.Errorf("unrecognized mount requirement [%v] for secret [%v]", secret.MountRequirement.String(), secret.Key) + err := fmt.Errorf("unrecognized mount requirement [%v] for secret [%v]", secret.GetMountRequirement().String(), secret.GetKey()) logger.Error(ctx, err) return p, false, err } @@ -138,7 +138,7 @@ func createAWSSidecarContainer(cfg config.AWSSecretManagerConfig, p *corev1.Pod, }, { Name: AWSSecretFilenameEnvVar, - Value: filepath.Join(string(filepath.Separator), strings.ToLower(secret.Group), strings.ToLower(secret.Key)), + Value: filepath.Join(string(filepath.Separator), strings.ToLower(secret.GetGroup()), strings.ToLower(secret.GetKey())), }, }, Resources: cfg.Resources, diff --git a/flytepropeller/pkg/webhook/aws_secret_manager_test.go b/flytepropeller/pkg/webhook/aws_secret_manager_test.go index d2a74de80b..cb0c9ddba4 100644 --- a/flytepropeller/pkg/webhook/aws_secret_manager_test.go +++ b/flytepropeller/pkg/webhook/aws_secret_manager_test.go @@ -44,11 +44,11 @@ func TestAWSSecretManagerInjector_Inject(t *testing.T) { Env: []corev1.EnvVar{ { Name: "SECRET_ARN", - Value: inputSecret.Group + ":" + inputSecret.Key, + Value: inputSecret.GetGroup() + ":" + inputSecret.GetKey(), }, { Name: "SECRET_FILENAME", - Value: "/" + inputSecret.Group + "/" + inputSecret.Key, + Value: "/" + inputSecret.GetGroup() + "/" + inputSecret.GetKey(), }, { Name: "FLYTE_SECRETS_DEFAULT_DIR", diff --git a/flytepropeller/pkg/webhook/gcp_secret_manager.go b/flytepropeller/pkg/webhook/gcp_secret_manager.go index c69705594e..4db4a0d3ab 100644 --- a/flytepropeller/pkg/webhook/gcp_secret_manager.go +++ b/flytepropeller/pkg/webhook/gcp_secret_manager.go @@ -41,12 +41,12 @@ func formatGCPSecretAccessCommand(secret *core.Secret) []string { // `gcloud` writes this file with permission 0600. // This will cause permission issues in the main container when using non-root // users, so we fix the file permissions with `chmod`. - secretDir := strings.ToLower(filepath.Join(GCPSecretMountPath, secret.Group)) - secretPath := strings.ToLower(filepath.Join(secretDir, secret.GroupVersion)) + secretDir := strings.ToLower(filepath.Join(GCPSecretMountPath, secret.GetGroup())) + secretPath := strings.ToLower(filepath.Join(secretDir, secret.GetGroupVersion())) args := fmt.Sprintf( "gcloud secrets versions access %[1]s/versions/%[2]s --out-file=%[4]s || gcloud secrets versions access %[2]s --secret=%[1]s --out-file=%[4]s; chmod +rX %[3]s %[4]s", - secret.Group, - secret.GroupVersion, + secret.GetGroup(), + secret.GetGroupVersion(), secretDir, secretPath, ) @@ -62,12 +62,12 @@ func (i GCPSecretManagerInjector) Type() config.SecretManagerType { } func (i GCPSecretManagerInjector) Inject(ctx context.Context, secret *core.Secret, p *corev1.Pod) (newP *corev1.Pod, injected bool, err error) { - if len(secret.Group) == 0 || len(secret.GroupVersion) == 0 { + if len(secret.GetGroup()) == 0 || len(secret.GetGroupVersion()) == 0 { return nil, false, fmt.Errorf("GCP Secrets Webhook require both group and group version to be set. "+ "Secret: [%v]", secret) } - switch secret.MountRequirement { + switch secret.GetMountRequirement() { case core.Secret_ANY: fallthrough case core.Secret_FILE: @@ -115,7 +115,7 @@ func (i GCPSecretManagerInjector) Inject(ctx context.Context, secret *core.Secre case core.Secret_ENV_VAR: fallthrough default: - err := fmt.Errorf("unrecognized mount requirement [%v] for secret [%v]", secret.MountRequirement.String(), secret.Key) + err := fmt.Errorf("unrecognized mount requirement [%v] for secret [%v]", secret.GetMountRequirement().String(), secret.GetKey()) logger.Error(ctx, err) return p, false, err } diff --git a/flytepropeller/pkg/webhook/global_secrets.go b/flytepropeller/pkg/webhook/global_secrets.go index a4b3543fb1..21432f6ccc 100644 --- a/flytepropeller/pkg/webhook/global_secrets.go +++ b/flytepropeller/pkg/webhook/global_secrets.go @@ -35,20 +35,20 @@ func (g GlobalSecrets) Inject(ctx context.Context, secret *coreIdl.Secret, p *co return p, false, err } - switch secret.MountRequirement { + switch secret.GetMountRequirement() { case coreIdl.Secret_FILE: return nil, false, fmt.Errorf("global secrets can only be injected as environment "+ - "variables [%v/%v]", secret.Group, secret.Key) + "variables [%v/%v]", secret.GetGroup(), secret.GetKey()) case coreIdl.Secret_ANY: fallthrough case coreIdl.Secret_ENV_VAR: - if len(secret.Group) == 0 { + if len(secret.GetGroup()) == 0 { return nil, false, fmt.Errorf("mounting a secret to env var requires selecting the "+ - "secret and a single key within. Key [%v]", secret.Key) + "secret and a single key within. Key [%v]", secret.GetKey()) } envVar := corev1.EnvVar{ - Name: strings.ToUpper(K8sDefaultEnvVarPrefix + secret.Group + EnvVarGroupKeySeparator + secret.Key), + Name: strings.ToUpper(K8sDefaultEnvVarPrefix + secret.GetGroup() + EnvVarGroupKeySeparator + secret.GetKey()), Value: v, } @@ -63,7 +63,7 @@ func (g GlobalSecrets) Inject(ctx context.Context, secret *coreIdl.Secret, p *co p.Spec.InitContainers = AppendEnvVars(p.Spec.InitContainers, envVar) p.Spec.Containers = AppendEnvVars(p.Spec.Containers, envVar) default: - err := fmt.Errorf("unrecognized mount requirement [%v] for secret [%v]", secret.MountRequirement.String(), secret.Key) + err := fmt.Errorf("unrecognized mount requirement [%v] for secret [%v]", secret.GetMountRequirement().String(), secret.GetKey()) logger.Error(ctx, err) return p, false, err } diff --git a/flytepropeller/pkg/webhook/k8s_secrets.go b/flytepropeller/pkg/webhook/k8s_secrets.go index 102d1ae6c1..68bb8669d2 100644 --- a/flytepropeller/pkg/webhook/k8s_secrets.go +++ b/flytepropeller/pkg/webhook/k8s_secrets.go @@ -38,12 +38,12 @@ func (i K8sSecretInjector) Type() config.SecretManagerType { } func (i K8sSecretInjector) Inject(ctx context.Context, secret *core.Secret, p *corev1.Pod) (newP *corev1.Pod, injected bool, err error) { - if len(secret.Group) == 0 || len(secret.Key) == 0 { + if len(secret.GetGroup()) == 0 || len(secret.GetKey()) == 0 { return nil, false, fmt.Errorf("k8s Secrets Webhook require both key and group to be set. "+ "Secret: [%v]", secret) } - switch secret.MountRequirement { + switch secret.GetMountRequirement() { case core.Secret_ANY: fallthrough case core.Secret_FILE: @@ -88,7 +88,7 @@ func (i K8sSecretInjector) Inject(ctx context.Context, secret *core.Secret, p *c p.Spec.InitContainers = AppendEnvVars(p.Spec.InitContainers, prefixEnvVar) p.Spec.Containers = AppendEnvVars(p.Spec.Containers, prefixEnvVar) default: - err := fmt.Errorf("unrecognized mount requirement [%v] for secret [%v]", secret.MountRequirement.String(), secret.Key) + err := fmt.Errorf("unrecognized mount requirement [%v] for secret [%v]", secret.GetMountRequirement().String(), secret.GetKey()) logger.Error(ctx, err) return p, false, err } diff --git a/flytepropeller/pkg/webhook/utils.go b/flytepropeller/pkg/webhook/utils.go index 92a4995c24..9d40cbbe6f 100644 --- a/flytepropeller/pkg/webhook/utils.go +++ b/flytepropeller/pkg/webhook/utils.go @@ -26,13 +26,13 @@ func hasEnvVar(envVars []corev1.EnvVar, envVarKey string) bool { func CreateEnvVarForSecret(secret *core.Secret) corev1.EnvVar { optional := true return corev1.EnvVar{ - Name: strings.ToUpper(K8sDefaultEnvVarPrefix + secret.Group + EnvVarGroupKeySeparator + secret.Key), + Name: strings.ToUpper(K8sDefaultEnvVarPrefix + secret.GetGroup() + EnvVarGroupKeySeparator + secret.GetKey()), ValueFrom: &corev1.EnvVarSource{ SecretKeyRef: &corev1.SecretKeySelector{ LocalObjectReference: corev1.LocalObjectReference{ - Name: secret.Group, + Name: secret.GetGroup(), }, - Key: secret.Key, + Key: secret.GetKey(), Optional: &optional, }, }, @@ -43,14 +43,14 @@ func CreateVolumeForSecret(secret *core.Secret) corev1.Volume { optional := true return corev1.Volume{ // we don't want to create different volume for the same secret group - Name: encoding.Base32Encoder.EncodeToString([]byte(secret.Group + EnvVarGroupKeySeparator + secret.GroupVersion)), + Name: encoding.Base32Encoder.EncodeToString([]byte(secret.GetGroup() + EnvVarGroupKeySeparator + secret.GetGroupVersion())), VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: secret.Group, + SecretName: secret.GetGroup(), Items: []corev1.KeyToPath{ { - Key: secret.Key, - Path: strings.ToLower(secret.Key), + Key: secret.GetKey(), + Path: strings.ToLower(secret.GetKey()), }, }, Optional: &optional, @@ -63,7 +63,7 @@ func CreateVolumeMountForSecret(volumeName string, secret *core.Secret) corev1.V return corev1.VolumeMount{ Name: volumeName, ReadOnly: true, - MountPath: filepath.Join(filepath.Join(K8sSecretPathPrefix...), strings.ToLower(secret.Group)), + MountPath: filepath.Join(filepath.Join(K8sSecretPathPrefix...), strings.ToLower(secret.GetGroup())), } } @@ -130,15 +130,15 @@ func CreateVaultAnnotationsForSecret(secret *core.Secret, kvversion config.KVVer id := string(uuid.NewUUID()) secretVaultAnnotations := map[string]string{ - fmt.Sprintf("vault.hashicorp.com/agent-inject-secret-%s", id): secret.Group, - fmt.Sprintf("vault.hashicorp.com/agent-inject-file-%s", id): fmt.Sprintf("%s/%s", secret.Group, secret.Key), + fmt.Sprintf("vault.hashicorp.com/agent-inject-secret-%s", id): secret.GetGroup(), + fmt.Sprintf("vault.hashicorp.com/agent-inject-file-%s", id): fmt.Sprintf("%s/%s", secret.GetGroup(), secret.GetKey()), } // Set the consul template language query depending on the KV Secrets Engine version. // Version 1 stores plain k:v pairs under .Data, version 2 supports versioned secrets // and wraps the k:v pairs into an additional subfield. var query string - switch secret.GroupVersion { + switch secret.GetGroupVersion() { case "kv1": query = ".Data" case "kv2": @@ -157,7 +157,7 @@ func CreateVaultAnnotationsForSecret(secret *core.Secret, kvversion config.KVVer } } if query != "" { - template := fmt.Sprintf(`{{- with secret "%s" -}}{{ %s.%s }}{{- end -}}`, secret.Group, query, secret.Key) + template := fmt.Sprintf(`{{- with secret "%s" -}}{{ %s.%s }}{{- end -}}`, secret.GetGroup(), query, secret.GetKey()) secretVaultAnnotations[fmt.Sprintf("vault.hashicorp.com/agent-inject-template-%s", id)] = template } diff --git a/flytepropeller/pkg/webhook/vault_secret_manager.go b/flytepropeller/pkg/webhook/vault_secret_manager.go index 658e3970d1..e5430153d6 100644 --- a/flytepropeller/pkg/webhook/vault_secret_manager.go +++ b/flytepropeller/pkg/webhook/vault_secret_manager.go @@ -35,12 +35,12 @@ func (i VaultSecretManagerInjector) Type() config.SecretManagerType { } func (i VaultSecretManagerInjector) Inject(ctx context.Context, secret *coreIdl.Secret, p *corev1.Pod) (newP *corev1.Pod, injected bool, err error) { - if len(secret.Group) == 0 || len(secret.Key) == 0 { + if len(secret.GetGroup()) == 0 || len(secret.GetKey()) == 0 { return nil, false, fmt.Errorf("Vault Secrets Webhook requires both key and group to be set. "+ "Secret: [%v]", secret) } - switch secret.MountRequirement { + switch secret.GetMountRequirement() { case coreIdl.Secret_ANY: fallthrough case coreIdl.Secret_FILE: @@ -76,7 +76,7 @@ func (i VaultSecretManagerInjector) Inject(ctx context.Context, secret *coreIdl. case coreIdl.Secret_ENV_VAR: return p, false, fmt.Errorf("Env_Var is not a supported mount requirement for Vault Secret Manager") default: - err := fmt.Errorf("unrecognized mount requirement [%v] for secret [%v]", secret.MountRequirement.String(), secret.Key) + err := fmt.Errorf("unrecognized mount requirement [%v] for secret [%v]", secret.GetMountRequirement().String(), secret.GetKey()) logger.Error(ctx, err) return p, false, err } diff --git a/flytestdlib/.golangci.yml b/flytestdlib/.golangci.yml index e3bff2320b..4ae605454b 100644 --- a/flytestdlib/.golangci.yml +++ b/flytestdlib/.golangci.yml @@ -1,34 +1,24 @@ -# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. -# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY: -# -# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst - run: skip-dirs: - pkg/client - linters: disable-all: true enable: - - deadcode - errcheck - - gas + - gosec - gci - goconst - goimports - - golint - gosimple - govet - ineffassign - misspell - nakedret - staticcheck - - structcheck - typecheck - unconvert - unused - - varcheck - + - protogetter linters-settings: gci: custom-order: true @@ -37,3 +27,5 @@ linters-settings: - default - prefix(github.com/flyteorg) skip-generated: true + goconst: + ignore-tests: true diff --git a/flytestdlib/bitarray/bitset.go b/flytestdlib/bitarray/bitset.go index 883b9ded65..be957fecb3 100644 --- a/flytestdlib/bitarray/bitset.go +++ b/flytestdlib/bitarray/bitset.go @@ -14,6 +14,7 @@ type BitSet []Block // Ensures that the given bit is set in the BitSet. func (s *BitSet) Set(i uint) { + // #nosec G115 if len(*s) < int(i/blockSize+1) { *s = append(*s, make([]Block, i/blockSize+1)...) } @@ -23,6 +24,7 @@ func (s *BitSet) Set(i uint) { // Ensures that the given bit is cleared (unset) in the BitSet. func (s *BitSet) Clear(i uint) { + // #nosec G115 if len(*s) >= int(i/blockSize+1) { (*s)[i/blockSize] &^= 1 << (i % blockSize) } @@ -30,6 +32,7 @@ func (s *BitSet) Clear(i uint) { // Returns true if the given bit is set, false if it is cleared. func (s *BitSet) IsSet(i uint) bool { + // #nosec G115 if len(*s) < int(i/blockSize+1) { return false } @@ -44,7 +47,8 @@ func (s *BitSet) BlockCount() int { // Returns the length of the BitSet. func (s *BitSet) Cap() uint { - return uint(s.BlockCount()) * blockSize + return uint(s.BlockCount()) * blockSize // #nosec G115 + } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/flytestdlib/bitarray/bitset_test.go b/flytestdlib/bitarray/bitset_test.go index 72e91f70d0..60572fbe3c 100644 --- a/flytestdlib/bitarray/bitset_test.go +++ b/flytestdlib/bitarray/bitset_test.go @@ -58,11 +58,11 @@ func TestNewBitSet(t *testing.T) { func TestBitSet_Cap(t *testing.T) { t.Run("Cap == size", func(t *testing.T) { b := NewBitSet(blockSize * 5) - assert.Equal(t, int(blockSize*5), int(b.Cap())) + assert.Equal(t, int(blockSize*5), int(b.Cap())) // #nosec G115 }) t.Run("Cap > size", func(t *testing.T) { b := NewBitSet(blockSize*2 + 20) - assert.Equal(t, int(blockSize*3), int(b.Cap())) + assert.Equal(t, int(blockSize*3), int(b.Cap())) // #nosec G115 }) } diff --git a/flytestdlib/bitarray/compact_array.go b/flytestdlib/bitarray/compact_array.go index 827c8c8532..f2d562a786 100644 --- a/flytestdlib/bitarray/compact_array.go +++ b/flytestdlib/bitarray/compact_array.go @@ -39,13 +39,16 @@ func (a *CompactArray) validateValue(value Item) { func (a *CompactArray) SetItem(index int, value Item) { a.validateIndex(index) a.validateValue(value) - bitIndex := uint(index) * a.ItemSize + bitIndex := uint(index) * a.ItemSize // #nosec G115 x := Item(1) + // #nosec G115 for i := int(a.ItemSize - 1); i >= 0; i-- { if x&value != 0 { - a.BitSet.Set(bitIndex + uint(i)) + a.BitSet.Set(bitIndex + uint(i)) // #nosec G115 + } else { - a.BitSet.Clear(bitIndex + uint(i)) + a.BitSet.Clear(bitIndex + uint(i)) // #nosec G115 + } x <<= 1 @@ -55,10 +58,12 @@ func (a *CompactArray) SetItem(index int, value Item) { // Gets Item at provided index. func (a *CompactArray) GetItem(index int) Item { a.validateIndex(index) - bitIndex := uint(index) * a.ItemSize + bitIndex := uint(index) * a.ItemSize // #nosec G115 res := Item(0) x := Item(1) + // #nosec G115 for i := int(a.ItemSize - 1); i >= 0; i-- { + // #nosec G115 if a.BitSet.IsSet(bitIndex + uint(i)) { res |= x } @@ -72,8 +77,9 @@ func (a *CompactArray) GetItem(index int) Item { // Gets all items stored in the array. The size of the returned array matches the ItemsCount it was initialized with. func (a CompactArray) GetItems() []Item { res := make([]Item, 0, a.ItemsCount) + // #nosec G115 for i := 0; i < int(a.ItemsCount); i++ { - res = append(res, a.GetItem(i)) + res = append(res, a.GetItem(i)) // #nosec G115 } return res diff --git a/flytestdlib/bitarray/compact_array_test.go b/flytestdlib/bitarray/compact_array_test.go index 7d41ee7b41..8d69f839d9 100644 --- a/flytestdlib/bitarray/compact_array_test.go +++ b/flytestdlib/bitarray/compact_array_test.go @@ -32,6 +32,7 @@ func TestNewItemArray(t *testing.T) { arr, err := NewCompactArray(itemsCount, Item(1)<<(itemSize-1)) assert.NoError(t, err) + // #nosec G115 for i := 0; i < int(itemsCount); i++ { // Ensure inserted items is in the accepted range (0 -> 1<.pb` to prevent the race condition. Furthermore, we propose that these error files get grouped in an `errors/` folder under the raw output prefix. + +#### Providing relevant error information to the backend an UI + +The pod entrypoint needs to provide the information in which worker the error occurred in order to display the name in the UI. For the strategy `"earliest"`, it needs to also provide the timestamp when the error occurred. + +We therefore propose to add optional attributes `worker` and `timestamp` (unix epoch time with micro- or nanoseconds granularity) to flyteidl's [`message ContainerError`](https://github.com/flyteorg/flyte/blob/30d33149159c90d0de44f6351b8d5d7309242e59/flyteidl/protos/flyteidl/core/errors.proto#L11). + + +Furthermore, we propose to add an optional `timestamp` attributes to all [flytekit exceptions](https://github.com/flyteorg/flytekit/tree/master/flytekit/exceptions). + +The flytekit pytorch elastic plugin, for instance, catches `ChildFailedError`s [here](https://github.com/flyteorg/flytekit/blob/77d056ab9fda40ec6b2312a4d197b9107cdb70dc/plugins/flytekit-kf-pytorch/flytekitplugins/kfpytorch/task.py#L449), would extract the timestamp, and re-raise it as a Flyte exception which contains a timestamp. (Other plugins, e.g. non-elastic pytorch, which don't come with built-in exception types that include error timestamps, can themselves record the timestamp when the `task_function` raises an exception.) + +The entrypoint `pyflyte-execute` will transfer the timestamp from the flytekit exception into the protobuf `ContainerError`. It will also set the `worker` attribute of the `ContainerError` according to the `FLYTE_INTERNAL_WORKER_NAME` environment variable introduced above. + +### Flytepropeller/Flyteplugins - Aggregate the errors in the backend + +In the [kubernetes plugin machinery](https://github.com/flyteorg/flyte/blob/815f85d0ce90a3ace61cce17c0bfb441ac2dbcc3/flyteplugins/go/tasks/pluginmachinery/k8s/plugin.go) we propose to define the error aggregation strategy and allow plugins to configure it via their `PluginProperties`: + +```go +type ErrorAggregationStrategy int + +const ( + // Single error file from a single container + Default ErrorAggregationStrategy = iota + + // Earliest error from potentially multiple error files + Earliest +) + +// System level properties that this Plugin supports +type PluginProperties struct { + ... + ErrorAggregationStrategy ErrorAggregationStrategy +} +``` + +Currently, [here](https://github.com/flyteorg/flyte/blob/4514860cf56ba62717f6c207f269410a8c1a5461/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager.go#L290) in the plugin manager, upon completion of a node execution, a new [`RemoteFileOutputReader`](https://github.com/flyteorg/flyte/blob/d6da838627d57cd27d60beea004e974ce1fb3ca5/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader.go#L14) is constructed which is responsible for reading the error file uploaded to blob storage. This `RemoteFileOutputReader` implements the [`OutputReader` interface](https://github.com/flyteorg/flyte/blob/1e54d21c4d4ee74245f799a57b4bb8a5534e8368/flyteplugins/go/tasks/pluginmachinery/io/iface.go#L32). + +We propose to implement a new `MultiErrorFileRemoteFileOutputReader` which (for future flexibility) can be configured with the different strategies we define. Initially, the only available strategy will be `"earliest"` which the RFC authors aim to use for the kubeflow pytorch plugin. This output reader will search for all error files in the `/errors` folder under the raw output prefix and aggregate the error as specified by the strategy. + +If in [the plugin manager](https://github.com/flyteorg/flyte/blob/4514860cf56ba62717f6c207f269410a8c1a5461/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager.go#L290) the respective plugin is found to configure an error aggregation strategy other than `Default`, we instantiate such a `MultiErrorFileRemoteFileOutputReader` reader (instead of the existing `RemoteFileOutputReader`) and configure it with the respective strategy. + +For the strategy `Earliest`, it will determine the `ContainerError` with the earliest timestamp, will use this one to determine retriability, and will communicate this specific error message to flyteadmin (and finally the UI). + +#### Backwards compatibility +We propose that the new `MultiErrorFileRemoteFileOutputReader` falls back to reading the `error.pb` (behaviour of the default `RemoteFileOutputReader`) if no `error-.pb` files are found in order to solve the problem of backwards compatibility: + +* If flytekit uses a version that supports multiple error files but the backend does not yet, `pyflyte-execute` will not upload multiple error files for distributed tasks since the `FLYTE_INTERNAL_DIST_ERROR_STRATEGY` environment variable will not be set. +* If flytekit uses an older version that does not support multiple error files while the backend does, a single error file will be uploaded despite `FLYTE_INTERNAL_DIST_ERROR_STRATEGY` being set. The output reader will, however, fall back to reading the single `error.pb`. + + +### Displaying the name of the worker which experienced the root cause error in the UI + +We propose that in the UI, in addition to the root-cause error message, for distributed tasks we display the name of the worker pod which experienced the root-cause error. As a user trying to debug a failure, this allows to quickly identify the logs of the relevant pod out of potentially hundreds of pods. + +To communicate the name of the worker which experienced the root-cause error from flytepropeller to flyteadmin and eventually the UI, we propose to add the (optional) attribute `worker` also in the [`core.ExecutionError` protobuf message](https://github.com/flyteorg/flyte/blob/815f85d0ce90a3ace61cce17c0bfb441ac2dbcc3/flyteidl/protos/flyteidl/core/execution.proto#L61). + +In `ReadError` of the new `MultiErrorFileRemoteFileOutputReader`, we will then transfer the name of the respective worker pod which experienced the root-cause error from the `ContainerError` in the `ErrorDocument` to the `core.ExecutionError` (as is already done today in the [`RemoteFileOutputReader` for the error message](https://github.com/flyteorg/flyte/blob/815f85d0ce90a3ace61cce17c0bfb441ac2dbcc3/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader.go#L65)). + +With these changes, flyteadmin's `/api/v1/executions///` endpoint, which today provides the error message to the UI, then also provides the information which worker experienced the root cause error. `flyteconsole` needs to be modified to show this information. + +## 4 Metrics & Dashboards + +- + +## 5 Drawbacks + +We don't see any drawbacks to making the error handling of distributed training tasks deterministic and making it easier for users to identify which pod in a distributed job failed first. + +## 6 Alternatives + +A poor man's version would be to not override the error file if it already exists. While this is a worse solution than proposed above as there still is a race condition, this would still be better than the current behavior because at least we would *favor* earlier errors instead of later ones. + +## 7 Potential Impact and Dependencies + +The authors of this RFC have experience with pytorch (elastic and non-elastic) distributed training jobs and will implement the proposed changes for the pytorch plugin. The improvement proposed in this RFC might be relevant for community members using e.g. the distributed tensorflow or mpi plugins. If possible, they should be included in the RFC and implementation process so that all distributed task plugins can benefit from the improved error handling. + +## 8 Unresolved questions + +- + +## 9 Conclusion + +With ML models getting bigger and bigger, distributed training jobs become increasingly important to the Flyte community. Removing the race condition outlined above from Flyte's error handling for such jobs will significantly improve the UX because we will be able to determine recoverability and report the root-cause error in the Flyte UI in a deterministic way. diff --git a/script/prepare_artifacts.sh b/script/prepare_artifacts.sh old mode 100644 new mode 100755 index 32cf1e769a..c794493f2e --- a/script/prepare_artifacts.sh +++ b/script/prepare_artifacts.sh @@ -29,6 +29,9 @@ sed "s/v0.1.10/${VERSION}/g" ./charts/flyte-binary/README.md > temp.txt && mv te grep -rlZ "version:[^P]*# VERSION" ./charts/flyteagent/Chart.yaml | xargs -0 sed -i "s/version:[^P]*# VERSION/version: ${VERSION} # VERSION/g" sed "s/v0.1.10/${VERSION}/g" ./charts/flyteagent/README.md > temp.txt && mv temp.txt ./charts/flyteagent/README.md +grep -rlZ "version:[^P]*# VERSION" ./charts/flyte-binary/Chart.yaml | xargs -0 sed -i "s/version:[^P]*# VERSION/version: ${VERSION} # VERSION/g" +sed "s/v0.1.10/${VERSION}/g" ./charts/flyte-binary/README.md > temp.txt && mv temp.txt ./charts/flyte-binary/README.md + helm dep update ./charts/flyte helm dep update ./charts/flyte-core helm dep update ./charts/flyte-deps @@ -67,5 +70,8 @@ sed -i "s,tag:[^P]*# FLYTE_TAG,tag: ${VERSION} # FLYTE_TAG," ./charts/flyte-bina sed -i "s,repository:[^P]*# FLYTECOPILOT_IMAGE,repository: cr.flyte.org/flyteorg/flytecopilot-release # FLYTECOPILOT_IMAGE," ./charts/flyte-binary/values.yaml sed -i "s,tag:[^P]*# FLYTECOPILOT_TAG,tag: ${VERSION} # FLYTECOPILOT_TAG," ./charts/flyte-binary/values.yaml +sed -i "s,tag:[^P]*# FLYTEBINARY_TAG,tag: ${VERSION} # FLYTEBINARY_TAG," ./charts/flyte-binary/values.yaml +sed -i "s,repository:[^P]*# FLYTEBINARY_IMAGE,repository: cr.flyte.org/flyteorg/flyte-binary-release # FLYTEBINARY_IMAGE," ./charts/flyte-binary/values.yaml + sed -i "s,tag:[^P]*# FLYTEAGENT_TAG,tag: ${VERSION} # FLYTEAGENT_TAG," ./charts/flyteagent/values.yaml sed -i "s,repository:[^P]*# FLYTEAGENT_IMAGE,repository: cr.flyte.org/flyteorg/flyteagent-release # FLYTEAGENT_IMAGE," ./charts/flyteagent/values.yaml diff --git a/script/release.sh b/script/release.sh index 9816fb6a7c..beae0203b7 100755 --- a/script/release.sh +++ b/script/release.sh @@ -25,4 +25,6 @@ sed -i "s,image:[^P]*# FLYTECOPILOT_IMAGE,image: cr.flyte.org/flyteorg/flytecopi sed -i "s,image:[^P]*# FLYTECOPILOT_IMAGE,image: cr.flyte.org/flyteorg/flytecopilot:${VERSION} # FLYTECOPILOT_IMAGE," ./charts/flyte-core/values.yaml sed -i "s,tag:[^P]*# FLYTECOPILOT_TAG,tag: ${VERSION} # FLYTECOPILOT_TAG," ./charts/flyte-binary/values.yaml +sed -i "s,tag:[^P]*# FLYTEBINARY_TAG,tag: ${VERSION} # FLYTEBINARY_TAG," ./charts/flyte-binary/values.yaml + sed -i "s,tag:[^P]*# FLYTEAGENT_TAG,tag: ${FLYTEKIT_TAG} # FLYTEAGENT_TAG," ./charts/flyteagent/values.yaml