diff --git a/boilerplate/flyte/golang_support_tools/go.mod b/boilerplate/flyte/golang_support_tools/go.mod
index c7676dd907..469b42d079 100644
--- a/boilerplate/flyte/golang_support_tools/go.mod
+++ b/boilerplate/flyte/golang_support_tools/go.mod
@@ -1,12 +1,12 @@
module github.com/flyteorg/boilerplate
-go 1.22
+go 1.22.1
require (
github.com/EngHabu/mockery v0.0.0-20220916190332-dde70e38baba
github.com/alvaroloes/enumer v1.1.2
github.com/flyteorg/flyte/flytestdlib v1.11.0
- github.com/golangci/golangci-lint v1.53.3
+ github.com/golangci/golangci-lint v1.61.0
github.com/pseudomuto/protoc-gen-doc v1.4.1
github.com/vektra/mockery/v2 v2.40.3
)
@@ -14,191 +14,198 @@ require (
require (
4d63.com/gocheckcompilerdirectives v1.2.1 // indirect
4d63.com/gochecknoglobals v0.2.1 // indirect
- cloud.google.com/go v0.112.0 // indirect
- cloud.google.com/go/compute v1.23.3 // indirect
- cloud.google.com/go/compute/metadata v0.2.3 // indirect
- cloud.google.com/go/iam v1.1.5 // indirect
- cloud.google.com/go/storage v1.36.0 // indirect
- github.com/4meepo/tagalign v1.2.2 // indirect
- github.com/Abirdcfly/dupword v0.0.11 // indirect
- github.com/Antonboom/errname v0.1.10 // indirect
- github.com/Antonboom/nilnil v0.1.5 // indirect
+ cloud.google.com/go v0.115.1 // indirect
+ cloud.google.com/go/auth v0.9.3 // indirect
+ cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect
+ cloud.google.com/go/compute/metadata v0.5.0 // indirect
+ cloud.google.com/go/iam v1.2.0 // indirect
+ cloud.google.com/go/storage v1.43.0 // indirect
+ github.com/4meepo/tagalign v1.3.4 // indirect
+ github.com/Abirdcfly/dupword v0.1.1 // indirect
+ github.com/Antonboom/errname v0.1.13 // indirect
+ github.com/Antonboom/nilnil v0.1.9 // indirect
+ github.com/Antonboom/testifylint v1.4.3 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
- github.com/BurntSushi/toml v1.3.2 // indirect
+ github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect
+ github.com/Crocmagnon/fatcontext v0.5.2 // indirect
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect
- github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 // indirect
+ github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 // indirect
github.com/Masterminds/semver v1.5.0 // indirect
+ github.com/Masterminds/semver/v3 v3.3.0 // indirect
github.com/Masterminds/sprig v2.15.0+incompatible // indirect
- github.com/OpenPeeDeeP/depguard/v2 v2.1.0 // indirect
- github.com/alexkohler/nakedret/v2 v2.0.2 // indirect
+ github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect
+ github.com/alecthomas/go-check-sumtype v0.1.4 // indirect
+ github.com/alexkohler/nakedret/v2 v2.0.4 // indirect
github.com/alexkohler/prealloc v1.0.0 // indirect
github.com/alingse/asasalint v0.0.11 // indirect
github.com/aokoli/goutils v1.0.1 // indirect
- github.com/ashanbrown/forbidigo v1.5.3 // indirect
+ github.com/ashanbrown/forbidigo v1.6.0 // indirect
github.com/ashanbrown/makezero v1.1.1 // indirect
github.com/aws/aws-sdk-go v1.44.2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bkielbasa/cyclop v1.2.1 // indirect
github.com/blizzy78/varnamelen v0.8.0 // indirect
- github.com/bombsimon/wsl/v3 v3.4.0 // indirect
- github.com/breml/bidichk v0.2.4 // indirect
- github.com/breml/errchkjson v0.3.1 // indirect
- github.com/butuzov/ireturn v0.2.0 // indirect
- github.com/butuzov/mirror v1.1.0 // indirect
+ github.com/bombsimon/wsl/v4 v4.4.1 // indirect
+ github.com/breml/bidichk v0.2.7 // indirect
+ github.com/breml/errchkjson v0.3.6 // indirect
+ github.com/butuzov/ireturn v0.3.0 // indirect
+ github.com/butuzov/mirror v1.2.0 // indirect
+ github.com/catenacyber/perfsprint v0.7.1 // indirect
+ github.com/ccojocar/zxcvbn-go v1.0.2 // indirect
github.com/cespare/xxhash v1.1.0 // indirect
- github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/charithe/durationcheck v0.0.10 // indirect
- github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 // indirect
+ github.com/chavacava/garif v0.1.0 // indirect
github.com/chigopher/pathlib v0.19.1 // indirect
+ github.com/ckaznocha/intrange v0.2.0 // indirect
github.com/coocood/freecache v1.1.1 // indirect
github.com/curioswitch/go-reassign v0.2.0 // indirect
- github.com/daixiang0/gci v0.10.1 // indirect
+ github.com/daixiang0/gci v0.13.5 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/denis-tingaikin/go-header v0.4.3 // indirect
+ github.com/denis-tingaikin/go-header v0.5.0 // indirect
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect
github.com/ernesto-jimenez/gogen v0.0.0-20180125220232-d7d4131e6607 // indirect
- github.com/esimonov/ifshort v1.0.4 // indirect
- github.com/ettle/strcase v0.1.1 // indirect
+ github.com/ettle/strcase v0.2.0 // indirect
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
- github.com/fatih/color v1.15.0 // indirect
+ github.com/fatih/color v1.17.0 // indirect
github.com/fatih/structtag v1.2.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
- github.com/firefart/nonamedreturns v1.0.4 // indirect
+ github.com/firefart/nonamedreturns v1.0.5 // indirect
github.com/flyteorg/stow v0.3.10 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/fzipp/gocyclo v0.6.0 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
- github.com/go-critic/go-critic v0.8.1 // indirect
- github.com/go-logr/logr v1.3.0 // indirect
+ github.com/ghostiam/protogetter v0.3.6 // indirect
+ github.com/go-critic/go-critic v0.11.4 // indirect
+ github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/go-toolsmith/astcast v1.1.0 // indirect
github.com/go-toolsmith/astcopy v1.1.0 // indirect
- github.com/go-toolsmith/astequal v1.1.0 // indirect
+ github.com/go-toolsmith/astequal v1.2.0 // indirect
github.com/go-toolsmith/astfmt v1.1.0 // indirect
github.com/go-toolsmith/astp v1.1.0 // indirect
github.com/go-toolsmith/strparse v1.1.0 // indirect
github.com/go-toolsmith/typep v1.1.0 // indirect
+ github.com/go-viper/mapstructure/v2 v2.1.0 // indirect
github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect
github.com/gobwas/glob v0.2.3 // indirect
- github.com/gofrs/flock v0.8.1 // indirect
+ github.com/gofrs/flock v0.12.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
- github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect
- github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect
- github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 // indirect
- github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect
- github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect
- github.com/golangci/misspell v0.4.0 // indirect
- github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 // indirect
- github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect
+ github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9 // indirect
+ github.com/golangci/misspell v0.6.0 // indirect
+ github.com/golangci/modinfo v0.3.4 // indirect
+ github.com/golangci/plugin-module-register v0.1.1 // indirect
+ github.com/golangci/revgrep v0.5.3 // indirect
+ github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
- github.com/google/s2a-go v0.1.7 // indirect
+ github.com/google/s2a-go v0.1.8 // indirect
github.com/google/uuid v1.6.0 // indirect
- github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
- github.com/googleapis/gax-go/v2 v2.12.0 // indirect
- github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 // indirect
+ github.com/googleapis/enterprise-certificate-proxy v0.3.3 // indirect
+ github.com/googleapis/gax-go/v2 v2.13.0 // indirect
+ github.com/gordonklaus/ineffassign v0.1.0 // indirect
github.com/gostaticanalysis/analysisutil v0.7.1 // indirect
github.com/gostaticanalysis/comment v1.4.2 // indirect
github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect
github.com/gostaticanalysis/nilerr v0.1.1 // indirect
- github.com/hashicorp/errwrap v1.1.0 // indirect
- github.com/hashicorp/go-multierror v1.1.1 // indirect
- github.com/hashicorp/go-version v1.6.0 // indirect
+ github.com/hashicorp/go-version v1.7.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/hexops/gotextdiff v1.0.3 // indirect
github.com/huandu/xstrings v1.4.0 // indirect
github.com/iancoleman/strcase v0.3.0 // indirect
github.com/imdario/mergo v0.3.6 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
- github.com/jgautheron/goconst v1.5.1 // indirect
+ github.com/jgautheron/goconst v1.7.1 // indirect
github.com/jingyugao/rowserrcheck v1.1.1 // indirect
github.com/jinzhu/copier v0.3.5 // indirect
github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect
+ github.com/jjti/go-spancheck v0.6.2 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/julz/importas v0.1.0 // indirect
- github.com/kisielk/errcheck v1.6.3 // indirect
- github.com/kisielk/gotool v1.0.0 // indirect
- github.com/kkHAIKE/contextcheck v1.1.4 // indirect
+ github.com/karamaru-alpha/copyloopvar v1.1.0 // indirect
+ github.com/kisielk/errcheck v1.7.0 // indirect
+ github.com/kkHAIKE/contextcheck v1.1.5 // indirect
github.com/kulti/thelper v0.6.3 // indirect
- github.com/kunwardeep/paralleltest v1.0.7 // indirect
+ github.com/kunwardeep/paralleltest v1.0.10 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/kyoh86/exportloopref v0.1.11 // indirect
- github.com/ldez/gomoddirectives v0.2.3 // indirect
+ github.com/lasiar/canonicalheader v1.1.1 // indirect
+ github.com/ldez/gomoddirectives v0.2.4 // indirect
github.com/ldez/tagliatelle v0.5.0 // indirect
- github.com/leonklingele/grouper v1.1.1 // indirect
+ github.com/leonklingele/grouper v1.1.2 // indirect
github.com/lufeee/execinquery v1.2.1 // indirect
+ github.com/macabu/inamedparam v0.1.3 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/maratori/testableexamples v1.0.0 // indirect
github.com/maratori/testpackage v1.1.1 // indirect
github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
- github.com/mattn/go-isatty v0.0.17 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
- github.com/mbilski/exhaustivestruct v1.2.0 // indirect
- github.com/mgechev/revive v1.3.2 // indirect
+ github.com/mgechev/revive v1.3.9 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
- github.com/moricho/tparallel v0.3.1 // indirect
+ github.com/moricho/tparallel v0.3.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007 // indirect
github.com/nakabonne/nestif v0.3.1 // indirect
- github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect
github.com/ncw/swift v1.0.53 // indirect
- github.com/nishanths/exhaustive v0.11.0 // indirect
+ github.com/nishanths/exhaustive v0.12.0 // indirect
github.com/nishanths/predeclared v0.2.2 // indirect
- github.com/nunnatsa/ginkgolinter v0.12.1 // indirect
+ github.com/nunnatsa/ginkgolinter v0.16.2 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/pascaldekloe/name v0.0.0-20180628100202-0fd16699aae1 // indirect
- github.com/pelletier/go-toml/v2 v2.0.6 // indirect
+ github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/polyfloyd/go-errorlint v1.4.2 // indirect
+ github.com/polyfloyd/go-errorlint v1.6.0 // indirect
github.com/prometheus/client_golang v1.16.0 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.10.1 // indirect
github.com/pseudomuto/protokit v0.2.0 // indirect
- github.com/quasilyte/go-ruleguard v0.3.19 // indirect
+ github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 // indirect
+ github.com/quasilyte/go-ruleguard/dsl v0.3.22 // indirect
github.com/quasilyte/gogrep v0.5.0 // indirect
github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect
github.com/rs/zerolog v1.29.0 // indirect
- github.com/ryancurrah/gomodguard v1.3.0 // indirect
- github.com/ryanrolds/sqlclosecheck v0.4.0 // indirect
+ github.com/ryancurrah/gomodguard v1.3.5 // indirect
+ github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect
github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect
+ github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect
github.com/sashamelentyev/interfacebloat v1.1.0 // indirect
- github.com/sashamelentyev/usestdlibvars v1.23.0 // indirect
- github.com/securego/gosec/v2 v2.16.0 // indirect
+ github.com/sashamelentyev/usestdlibvars v1.27.0 // indirect
+ github.com/securego/gosec/v2 v2.21.2 // indirect
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/sivchari/containedctx v1.0.3 // indirect
- github.com/sivchari/nosnakecase v1.7.0 // indirect
- github.com/sivchari/tenv v1.7.1 // indirect
+ github.com/sivchari/tenv v1.10.0 // indirect
github.com/sonatard/noctx v0.0.2 // indirect
github.com/sourcegraph/go-diff v0.7.0 // indirect
- github.com/spf13/afero v1.10.0 // indirect
+ github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.5.0 // indirect
- github.com/spf13/cobra v1.7.0 // indirect
+ github.com/spf13/cobra v1.8.1 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.15.0 // indirect
@@ -207,67 +214,65 @@ require (
github.com/stretchr/objx v0.5.2 // indirect
github.com/stretchr/testify v1.9.0 // indirect
github.com/subosito/gotenv v1.4.2 // indirect
- github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect
github.com/tdakkota/asciicheck v0.2.0 // indirect
- github.com/tetafro/godot v1.4.11 // indirect
+ github.com/tetafro/godot v1.4.17 // indirect
github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 // indirect
github.com/timonwong/loggercheck v0.9.4 // indirect
- github.com/tomarrell/wrapcheck/v2 v2.8.1 // indirect
+ github.com/tomarrell/wrapcheck/v2 v2.9.0 // indirect
github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect
- github.com/ultraware/funlen v0.0.3 // indirect
- github.com/ultraware/whitespace v0.0.5 // indirect
- github.com/uudashr/gocognit v1.0.6 // indirect
- github.com/xen0n/gosmopolitan v1.2.1 // indirect
+ github.com/ultraware/funlen v0.1.0 // indirect
+ github.com/ultraware/whitespace v0.1.1 // indirect
+ github.com/uudashr/gocognit v1.1.3 // indirect
+ github.com/xen0n/gosmopolitan v1.2.2 // indirect
github.com/yagipy/maintidx v1.0.0 // indirect
- github.com/yeya24/promlinter v0.2.0 // indirect
- github.com/ykadowak/zerologlint v0.1.2 // indirect
- gitlab.com/bosi/decorder v0.2.3 // indirect
+ github.com/yeya24/promlinter v0.3.0 // indirect
+ github.com/ykadowak/zerologlint v0.1.5 // indirect
+ gitlab.com/bosi/decorder v0.4.2 // indirect
+ go-simpler.org/musttag v0.12.2 // indirect
+ go-simpler.org/sloglint v0.7.2 // indirect
go.opencensus.io v0.24.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect
- go.opentelemetry.io/otel v1.21.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
+ go.opentelemetry.io/otel v1.29.0 // indirect
go.opentelemetry.io/otel/exporters/jaeger v1.17.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.19.0 // indirect
- go.opentelemetry.io/otel/metric v1.21.0 // indirect
- go.opentelemetry.io/otel/sdk v1.21.0 // indirect
- go.opentelemetry.io/otel/trace v1.21.0 // indirect
- go.tmz.dev/musttag v0.7.0 // indirect
+ go.opentelemetry.io/otel/metric v1.29.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.28.0 // indirect
+ go.opentelemetry.io/otel/trace v1.29.0 // indirect
+ go.uber.org/automaxprocs v1.5.3 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.25.0 // indirect
- golang.org/x/crypto v0.25.0 // indirect
- golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea // indirect
- golang.org/x/exp/typeparams v0.0.0-20230224173230-c95f2b4c22f2 // indirect
- golang.org/x/mod v0.17.0 // indirect
- golang.org/x/net v0.27.0 // indirect
- golang.org/x/oauth2 v0.16.0 // indirect
- golang.org/x/sync v0.7.0 // indirect
- golang.org/x/sys v0.22.0 // indirect
- golang.org/x/term v0.22.0 // indirect
- golang.org/x/text v0.16.0 // indirect
- golang.org/x/time v0.5.0 // indirect
- golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
- google.golang.org/api v0.155.0 // indirect
- google.golang.org/appengine v1.6.8 // indirect
- google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect
- google.golang.org/grpc v1.62.1 // indirect
- google.golang.org/protobuf v1.33.0 // indirect
+ golang.org/x/crypto v0.27.0 // indirect
+ golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e // indirect
+ golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f // indirect
+ golang.org/x/mod v0.21.0 // indirect
+ golang.org/x/net v0.28.0 // indirect
+ golang.org/x/oauth2 v0.22.0 // indirect
+ golang.org/x/sync v0.8.0 // indirect
+ golang.org/x/sys v0.25.0 // indirect
+ golang.org/x/term v0.24.0 // indirect
+ golang.org/x/text v0.18.0 // indirect
+ golang.org/x/time v0.6.0 // indirect
+ golang.org/x/tools v0.24.0 // indirect
+ google.golang.org/api v0.196.0 // indirect
+ google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
+ google.golang.org/grpc v1.66.0 // indirect
+ google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- honnef.co/go/tools v0.4.3 // indirect
+ honnef.co/go/tools v0.5.1 // indirect
k8s.io/api v0.28.2 // indirect
k8s.io/apimachinery v0.28.2 // indirect
k8s.io/client-go v0.28.1 // indirect
k8s.io/klog/v2 v2.100.1 // indirect
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect
- mvdan.cc/gofumpt v0.5.0 // indirect
- mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect
- mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect
- mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d // indirect
+ mvdan.cc/gofumpt v0.7.0 // indirect
+ mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f // indirect
sigs.k8s.io/controller-runtime v0.0.0-00010101000000-000000000000 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
diff --git a/boilerplate/flyte/golang_support_tools/go.sum b/boilerplate/flyte/golang_support_tools/go.sum
index 6aa6b8d969..fc017b6f44 100644
--- a/boilerplate/flyte/golang_support_tools/go.sum
+++ b/boilerplate/flyte/golang_support_tools/go.sum
@@ -3,61 +3,30 @@
4d63.com/gochecknoglobals v0.2.1 h1:1eiorGsgHOFOuoOiJDy2psSrQbRdIHrlge0IJIkUgDc=
4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
-cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
-cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
-cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
-cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
-cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
-cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
-cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
-cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
-cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
-cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
-cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI=
-cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
-cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI=
-cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
-cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
-cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
-cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
-cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8=
-cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/4meepo/tagalign v1.2.2 h1:kQeUTkFTaBRtd/7jm8OKJl9iHk0gAO+TDFPHGSna0aw=
-github.com/4meepo/tagalign v1.2.2/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE=
-github.com/Abirdcfly/dupword v0.0.11 h1:z6v8rMETchZXUIuHxYNmlUAuKuB21PeaSymTed16wgU=
-github.com/Abirdcfly/dupword v0.0.11/go.mod h1:wH8mVGuf3CP5fsBTkfWwwwKTjDnVVCxtU8d8rgeVYXA=
-github.com/Antonboom/errname v0.1.10 h1:RZ7cYo/GuZqjr1nuJLNe8ZH+a+Jd9DaZzttWzak9Bls=
-github.com/Antonboom/errname v0.1.10/go.mod h1:xLeiCIrvVNpUtsN0wxAh05bNIZpqE22/qDMnTBTttiA=
-github.com/Antonboom/nilnil v0.1.5 h1:X2JAdEVcbPaOom2TUa1FxZ3uyuUlex0XMLGYMemu6l0=
-github.com/Antonboom/nilnil v0.1.5/go.mod h1:I24toVuBKhfP5teihGWctrRiPbRKHwZIFOvc6v3HZXk=
+cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ=
+cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc=
+cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U=
+cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk=
+cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY=
+cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc=
+cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY=
+cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY=
+cloud.google.com/go/iam v1.2.0 h1:kZKMKVNk/IsSSc/udOb83K0hL/Yh/Gcqpz+oAkoIFN8=
+cloud.google.com/go/iam v1.2.0/go.mod h1:zITGuWgsLZxd8OwAlX+eMFgZDXzBm7icj1PVTYG766Q=
+cloud.google.com/go/longrunning v0.6.0 h1:mM1ZmaNsQsnb+5n1DNPeL0KwQd9jQRqSqSDEkBZr+aI=
+cloud.google.com/go/longrunning v0.6.0/go.mod h1:uHzSZqW89h7/pasCWNYdUpwGz3PcVWhrWupreVPYLts=
+cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs=
+cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0=
+github.com/4meepo/tagalign v1.3.4 h1:P51VcvBnf04YkHzjfclN6BbsopfJR5rxs1n+5zHt+w8=
+github.com/4meepo/tagalign v1.3.4/go.mod h1:M+pnkHH2vG8+qhE5bVc/zeP7HS/j910Fwa9TUSyZVI0=
+github.com/Abirdcfly/dupword v0.1.1 h1:Bsxe0fIw6OwBtXMIncaTxCLHYO5BB+3mcsR5E8VXloY=
+github.com/Abirdcfly/dupword v0.1.1/go.mod h1:B49AcJdTYYkpd4HjgAcutNGG9HZ2JWwKunH9Y2BA6sM=
+github.com/Antonboom/errname v0.1.13 h1:JHICqsewj/fNckzrfVSe+T33svwQxmjC+1ntDsHOVvM=
+github.com/Antonboom/errname v0.1.13/go.mod h1:uWyefRYRN54lBg6HseYCFhs6Qjcy41Y3Jl/dVhA87Ns=
+github.com/Antonboom/nilnil v0.1.9 h1:eKFMejSxPSA9eLSensFmjW2XTgTwJMjZ8hUHtV4s/SQ=
+github.com/Antonboom/nilnil v0.1.9/go.mod h1:iGe2rYwCq5/Me1khrysB4nwI7swQvjclR8/YRPl5ihQ=
+github.com/Antonboom/testifylint v1.4.3 h1:ohMt6AHuHgttaQ1xb6SSnxCeK4/rnK7KKzbvs7DmEck=
+github.com/Antonboom/testifylint v1.4.3/go.mod h1:+8Q9+AOLsz5ZiQiiYujJKs9mNz398+M6UgslP4qgJLA=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc=
@@ -71,31 +40,35 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0/go.mod h1:WCPBHsOXfB
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
-github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs=
+github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
+github.com/Crocmagnon/fatcontext v0.5.2 h1:vhSEg8Gqng8awhPju2w7MKHqMlg4/NI+gSDHtR3xgwA=
+github.com/Crocmagnon/fatcontext v0.5.2/go.mod h1:87XhRMaInHP44Q7Tlc7jkgKKB7kZAOPiDkFMdKCC+74=
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM=
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
github.com/EngHabu/mockery v0.0.0-20220916190332-dde70e38baba h1:HDBbUo0odjuCCtStDS//vNd3CeP1GdjQVhFmSZLnFwU=
github.com/EngHabu/mockery v0.0.0-20220916190332-dde70e38baba/go.mod h1:DjqxgJ6VUERvvVE41d4Rrn72K29MXwk9ziY18bi36BU=
-github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 h1:+r1rSv4gvYn0wmRjC8X7IAzX8QezqtFV9m0MUHFJgts=
-github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0/go.mod h1:b3g59n2Y+T5xmcxJL+UEG2f8cQploZm1mR/v6BW0mU0=
+github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 h1:/fTUt5vmbkAcMBt4YQiuC23cV0kEsN1MVMNqeOW43cU=
+github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0/go.mod h1:ONJg5sxcbsdQQ4pOW8TGdTidT2TMAUy/2Xhr8mrYaao=
github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
+github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0=
+github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/Masterminds/sprig v2.15.0+incompatible h1:0gSxPGWS9PAr7U2NsQ2YQg6juRDINkUyuvbb4b2Xm8w=
github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/OpenPeeDeeP/depguard/v2 v2.1.0 h1:aQl70G173h/GZYhWf36aE5H0KaujXfVMnn/f1kSDVYY=
-github.com/OpenPeeDeeP/depguard/v2 v2.1.0/go.mod h1:PUBgk35fX4i7JDmwzlJwJ+GMe6NfO1723wmJMgPThNQ=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
-github.com/alexkohler/nakedret/v2 v2.0.2 h1:qnXuZNvv3/AxkAb22q/sEsEpcA99YxLFACDtEw9TPxE=
-github.com/alexkohler/nakedret/v2 v2.0.2/go.mod h1:2b8Gkk0GsOrqQv/gPWjNLDSKwG8I5moSXG1K4VIBcTQ=
+github.com/OpenPeeDeeP/depguard/v2 v2.2.0 h1:vDfG60vDtIuf0MEOhmLlLLSzqaRM8EMcgJPdp74zmpA=
+github.com/OpenPeeDeeP/depguard/v2 v2.2.0/go.mod h1:CIzddKRvLBC4Au5aYP/i3nyaWQ+ClszLIuVocRiCYFQ=
+github.com/alecthomas/assert/v2 v2.2.2 h1:Z/iVC0xZfWTaFNE6bA3z07T86hd45Xe2eLt6WVy2bbk=
+github.com/alecthomas/assert/v2 v2.2.2/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ=
+github.com/alecthomas/go-check-sumtype v0.1.4 h1:WCvlB3l5Vq5dZQTFmodqL2g68uHiSwwlWcT5a2FGK0c=
+github.com/alecthomas/go-check-sumtype v0.1.4/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ=
+github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk=
+github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
+github.com/alexkohler/nakedret/v2 v2.0.4 h1:yZuKmjqGi0pSmjGpOC016LtPJysIL0WEUiaXW5SUnNg=
+github.com/alexkohler/nakedret/v2 v2.0.4/go.mod h1:bF5i0zF2Wo2o4X4USt9ntUWve6JbFv02Ff4vlkmS/VU=
github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw=
github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE=
github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw=
@@ -104,98 +77,89 @@ github.com/alvaroloes/enumer v1.1.2 h1:5khqHB33TZy1GWCO/lZwcroBFh7u+0j40T83VUbfA
github.com/alvaroloes/enumer v1.1.2/go.mod h1:FxrjvuXoDAx9isTJrv4c+T410zFi0DtXIT0m65DJ+Wo=
github.com/aokoli/goutils v1.0.1 h1:7fpzNGoJ3VA8qcrm++XEE1QUe0mIwNeLa02Nwq7RDkg=
github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ=
-github.com/ashanbrown/forbidigo v1.5.3 h1:jfg+fkm/snMx+V9FBwsl1d340BV/99kZGv5jN9hBoXk=
-github.com/ashanbrown/forbidigo v1.5.3/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU=
+github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8gerOIVIY=
+github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU=
github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s=
github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI=
github.com/aws/aws-sdk-go v1.44.2 h1:5VBk5r06bgxgRKVaUtm1/4NT/rtrnH2E4cnAYv5zgQc=
github.com/aws/aws-sdk-go v1.44.2/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bkielbasa/cyclop v1.2.1 h1:AeF71HZDob1P2/pRm1so9cd1alZnrpyc4q2uP2l0gJY=
github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM=
github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M=
github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k=
-github.com/bombsimon/wsl/v3 v3.4.0 h1:RkSxjT3tmlptwfgEgTgU+KYKLI35p/tviNXNXiL2aNU=
-github.com/bombsimon/wsl/v3 v3.4.0/go.mod h1:KkIB+TXkqy6MvK9BDZVbZxKNYsE1/oLRJbIFtf14qqo=
-github.com/breml/bidichk v0.2.4 h1:i3yedFWWQ7YzjdZJHnPo9d/xURinSq3OM+gyM43K4/8=
-github.com/breml/bidichk v0.2.4/go.mod h1:7Zk0kRFt1LIZxtQdl9W9JwGAcLTTkOs+tN7wuEYGJ3s=
-github.com/breml/errchkjson v0.3.1 h1:hlIeXuspTyt8Y/UmP5qy1JocGNR00KQHgfaNtRAjoxQ=
-github.com/breml/errchkjson v0.3.1/go.mod h1:XroxrzKjdiutFyW3nWhw34VGg7kiMsDQox73yWCGI2U=
-github.com/butuzov/ireturn v0.2.0 h1:kCHi+YzC150GE98WFuZQu9yrTn6GEydO2AuPLbTgnO4=
-github.com/butuzov/ireturn v0.2.0/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc=
-github.com/butuzov/mirror v1.1.0 h1:ZqX54gBVMXu78QLoiqdwpl2mgmoOJTk7s4p4o+0avZI=
-github.com/butuzov/mirror v1.1.0/go.mod h1:8Q0BdQU6rC6WILDiBM60DBfvV78OLJmMmixe7GF45AE=
+github.com/bombsimon/wsl/v4 v4.4.1 h1:jfUaCkN+aUpobrMO24zwyAMwMAV5eSziCkOKEauOLdw=
+github.com/bombsimon/wsl/v4 v4.4.1/go.mod h1:Xu/kDxGZTofQcDGCtQe9KCzhHphIe0fDuyWTxER9Feo=
+github.com/breml/bidichk v0.2.7 h1:dAkKQPLl/Qrk7hnP6P+E0xOodrq8Us7+U0o4UBOAlQY=
+github.com/breml/bidichk v0.2.7/go.mod h1:YodjipAGI9fGcYM7II6wFvGhdMYsC5pHDlGzqvEW3tQ=
+github.com/breml/errchkjson v0.3.6 h1:VLhVkqSBH96AvXEyclMR37rZslRrY2kcyq+31HCsVrA=
+github.com/breml/errchkjson v0.3.6/go.mod h1:jhSDoFheAF2RSDOlCfhHO9KqhZgAYLyvHe7bRCX8f/U=
+github.com/butuzov/ireturn v0.3.0 h1:hTjMqWw3y5JC3kpnC5vXmFJAWI/m31jaCYQqzkS6PL0=
+github.com/butuzov/ireturn v0.3.0/go.mod h1:A09nIiwiqzN/IoVo9ogpa0Hzi9fex1kd9PSD6edP5ZA=
+github.com/butuzov/mirror v1.2.0 h1:9YVK1qIjNspaqWutSv8gsge2e/Xpq1eqEkslEUHy5cs=
+github.com/butuzov/mirror v1.2.0/go.mod h1:DqZZDtzm42wIAIyHXeN8W/qb1EPlb9Qn/if9icBOpdQ=
+github.com/catenacyber/perfsprint v0.7.1 h1:PGW5G/Kxn+YrN04cRAZKC+ZuvlVwolYMrIyyTJ/rMmc=
+github.com/catenacyber/perfsprint v0.7.1/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50=
+github.com/ccojocar/zxcvbn-go v1.0.2 h1:na/czXU8RrhXO4EZme6eQJLR4PzcGsahsBOAwU6I3Vg=
+github.com/ccojocar/zxcvbn-go v1.0.2/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
-github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4=
github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ=
-github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 h1:W9o46d2kbNL06lq7UNDPV0zYLzkrde/bjIqO02eoll0=
-github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8/go.mod h1:gakxgyXaaPkxvLw1XQxNGK4I37ys9iBRzNUx/B7pUCo=
+github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc=
+github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww=
github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764=
github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U=
github.com/chigopher/pathlib v0.19.1 h1:RoLlUJc0CqBGwq239cilyhxPNLXTK+HXoASGyGznx5A=
github.com/chigopher/pathlib v0.19.1/go.mod h1:tzC1dZLW8o33UQpWkNkhvPwL5n4yyFRFm/jL1YGWFvY=
-github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/ckaznocha/intrange v0.2.0 h1:FykcZuJ8BD7oX93YbO1UY9oZtkRbp+1/kJcDjkefYLs=
+github.com/ckaznocha/intrange v0.2.0/go.mod h1:r5I7nUlAAG56xmkOpw4XVr16BXhwYTUdcuRFeevn1oE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
-github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
github.com/coocood/freecache v1.1.1 h1:uukNF7QKCZEdZ9gAV7WQzvh0SbjwdMF6m3x3rxEkaPc=
github.com/coocood/freecache v1.1.1/go.mod h1:OKrEjkGVoxZhyWAJoeFi5BMLUJm2Tit0kpGkIr7NGYY=
github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo=
github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc=
-github.com/daixiang0/gci v0.10.1 h1:eheNA3ljF6SxnPD/vE4lCBusVHmV3Rs3dkKvFrJ7MR0=
-github.com/daixiang0/gci v0.10.1/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI=
+github.com/daixiang0/gci v0.13.5 h1:kThgmH1yBmZSBCh1EJVxQ7JsHpm5Oms0AMed/0LaH4c=
+github.com/daixiang0/gci v0.13.5/go.mod h1:12etP2OniiIdP4q+kjUGrC/rUagga7ODbqsom5Eo5Yk=
github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU=
-github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c=
+github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42t4429eC9k8=
+github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY=
github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v0.3.0-java/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
github.com/ernesto-jimenez/gogen v0.0.0-20180125220232-d7d4131e6607 h1:cTavhURetDkezJCvxFggiyLeP40Mrk/TtVg2+ycw1Es=
github.com/ernesto-jimenez/gogen v0.0.0-20180125220232-d7d4131e6607/go.mod h1:Cg4fM0vhYWOZdgM7RIOSTRNIc8/VT7CXClC3Ni86lu4=
-github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA=
-github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0=
-github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw=
-github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY=
+github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q=
+github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A=
github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww=
github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
-github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
-github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
+github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4=
+github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI=
github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4=
github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
-github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y=
-github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI=
+github.com/firefart/nonamedreturns v1.0.5 h1:tM+Me2ZaXs8tfdDw3X6DOX++wMCOqzYUho6tUTYIdRA=
+github.com/firefart/nonamedreturns v1.0.5/go.mod h1:gHJjDqhGM4WyPt639SOZs+G89Ko7QKH5R5BhnO6xJhw=
github.com/flyteorg/flyte/flytestdlib v1.11.0 h1:DxM/sf6H0ong8LIjgh0YwXK+abnGV8kWVi6EgfVCkO8=
github.com/flyteorg/flyte/flytestdlib v1.11.0/go.mod h1:AmgNCq/tGEDwVfilW1nFtgPQn8vQ9gcDu6SNwz1YY+M=
github.com/flyteorg/protoc-gen-doc v1.4.2 h1:Otw0F+RHaPQ8XlpzhLLgjsCMcrAIcMO01Zh+ALe3rrE=
@@ -210,21 +174,14 @@ github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-critic/go-critic v0.8.1 h1:16omCF1gN3gTzt4j4J6fKI/HnRojhEp+Eks6EuKw3vw=
-github.com/go-critic/go-critic v0.8.1/go.mod h1:kpzXl09SIJX1cr9TB/g/sAG+eFEl7ZS9f9cqvZtyNl0=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/ghostiam/protogetter v0.3.6 h1:R7qEWaSgFCsy20yYHNIJsU9ZOb8TziSRRxuAOTVKeOk=
+github.com/ghostiam/protogetter v0.3.6/go.mod h1:7lpeDnEJ1ZjL/YtyoN99ljO4z0pd3H0d18/t2dPBxHw=
+github.com/go-critic/go-critic v0.11.4 h1:O7kGOCx0NDIni4czrkRIXTnit0mkyKOCePh3My6OyEU=
+github.com/go-critic/go-critic v0.11.4/go.mod h1:2QAdo4iuLik5S9YG0rT4wcZ8QxwHYkrr6/2MWAiv/vc=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
-github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo=
@@ -235,16 +192,19 @@ github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2Kv
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI=
+github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
-github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
+github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
+github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8=
github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU=
github.com/go-toolsmith/astcopy v1.1.0 h1:YGwBN0WM+ekI/6SS6+52zLDEf8Yvp3n2seZITCUBt5s=
github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw=
github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4=
-github.com/go-toolsmith/astequal v1.1.0 h1:kHKm1AWqClYn15R0K1KKE4RG614D46n+nqUQ06E1dTw=
github.com/go-toolsmith/astequal v1.1.0/go.mod h1:sedf7VIdCL22LD8qIvv7Nn9MuWJruQA/ysswh64lffQ=
+github.com/go-toolsmith/astequal v1.2.0 h1:3Fs3CYZ1k9Vo4FzFhwwewC3CHISHDnVUPC4x0bI2+Cw=
+github.com/go-toolsmith/astequal v1.2.0/go.mod h1:c8NZ3+kSFtFY/8lPso4v8LuJjdJiUFVnSuU3s0qrrDY=
github.com/go-toolsmith/astfmt v1.1.0 h1:iJVPDPp6/7AaeLJEruMsBUlOYCmvg0MoCfJprsOmcco=
github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4=
github.com/go-toolsmith/astp v1.1.0 h1:dXPuCl6u2llURjdPLLDxJeZInAeZ0/eZwFJmqZMnpQA=
@@ -256,37 +216,27 @@ github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQi
github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ=
github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus=
github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig=
+github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w=
+github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U=
github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
-github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
+github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
+github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
@@ -295,45 +245,35 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
-github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0=
-github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM=
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
-github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6J5HIP8ZtyMdiDscjMLfRBSPuzVVeo=
-github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ=
-github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 h1:amWTbTGqOZ71ruzrdA+Nx5WA3tV1N0goTspwmKCQvBY=
-github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs=
-github.com/golangci/golangci-lint v1.53.3 h1:CUcRafczT4t1F+mvdkUm6KuOpxUZTl0yWN/rSU6sSMo=
-github.com/golangci/golangci-lint v1.53.3/go.mod h1:W4Gg3ONq6p3Jl+0s/h9Gr0j7yEgHJWWZO2bHl2tBUXM=
-github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA=
-github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
-github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA=
-github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o=
-github.com/golangci/misspell v0.4.0 h1:KtVB/hTK4bbL/S6bs64rYyk8adjmh1BygbBiaAiX+a0=
-github.com/golangci/misspell v0.4.0/go.mod h1:W6O/bwV6lGDxUCChm2ykw9NQdd5bYd1Xkjo88UcWyJc=
-github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 h1:DIPQnGy2Gv2FSA4B/hh8Q7xx3B7AIDk3DAMeHclH1vQ=
-github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs=
-github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys=
-github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9 h1:/1322Qns6BtQxUZDTAT4SdcoxknUki7IAoK4SAXr8ME=
+github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9/go.mod h1:Oesb/0uFAyWoaw1U1qS5zyjCg5NP9C9iwjnI4tIsXEE=
+github.com/golangci/golangci-lint v1.61.0 h1:VvbOLaRVWmyxCnUIMTbf1kDsaJbTzH20FAMXTAlQGu8=
+github.com/golangci/golangci-lint v1.61.0/go.mod h1:e4lztIrJJgLPhWvFPDkhiMwEFRrWlmFbrZea3FsJyN8=
+github.com/golangci/misspell v0.6.0 h1:JCle2HUTNWirNlDIAUO44hUsKhOFqGPoC4LZxlaSXDs=
+github.com/golangci/misspell v0.6.0/go.mod h1:keMNyY6R9isGaSAu+4Q8NMBwMPkh15Gtc8UCVoDtAWo=
+github.com/golangci/modinfo v0.3.4 h1:oU5huX3fbxqQXdfspamej74DFX0kyGLkw1ppvXoJ8GA=
+github.com/golangci/modinfo v0.3.4/go.mod h1:wytF1M5xl9u0ij8YSvhkEVPP3M5Mc7XLl1pxH3B2aUM=
+github.com/golangci/plugin-module-register v0.1.1 h1:TCmesur25LnyJkpsVrupv1Cdzo+2f7zX0H6Jkw1Ol6c=
+github.com/golangci/plugin-module-register v0.1.1/go.mod h1:TTpqoB6KkwOJMV8u7+NyXMrkwwESJLOkfl9TxR1DGFc=
+github.com/golangci/revgrep v0.5.3 h1:3tL7c1XBMtWHHqVpS5ChmiAAoe4PF/d5+ULzV9sLAzs=
+github.com/golangci/revgrep v0.5.3/go.mod h1:U4R/s9dlXZsg8uJmaR1GrloUr14D7qDl8gi2iPXJH8k=
+github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed h1:IURFTjxeTfNFP0hTEi1YKjB/ub8zkpaOqFFMApi2EAs=
+github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed/go.mod h1:XLXN8bNw4CGRPaqgl3bv/lhz7bsGPh4/xSaMTbo2vkQ=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
@@ -341,40 +281,22 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw=
-github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
-github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
-github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
+github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc=
+github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0=
+github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA=
+github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
+github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM=
+github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA=
github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
-github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
-github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
-github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
-github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 h1:mrEEilTAUmaAORhssPPkxj84TsHrPMLBGW2Z4SoTxm8=
-github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0=
+github.com/googleapis/enterprise-certificate-proxy v0.3.3 h1:QRje2j5GZimBzlbhGA2V2QlGNgL8G6e+wGo/+/2bWI0=
+github.com/googleapis/enterprise-certificate-proxy v0.3.3/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA=
+github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s=
+github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A=
+github.com/gordonklaus/ineffassign v0.1.0 h1:y2Gd/9I7MdY1oEIt+n+rowjBNDcLQq3RsH5hwJd0f9s=
+github.com/gordonklaus/ineffassign v0.1.0/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0=
github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk=
github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc=
github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado=
@@ -387,16 +309,9 @@ github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW
github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M=
github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY=
github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
-github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
-github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
-github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
+github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
@@ -406,52 +321,40 @@ github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU
github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI=
github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
-github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
-github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM=
-github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4=
+github.com/jgautheron/goconst v1.7.1 h1:VpdAG7Ca7yvvJk5n8dMwQhfEZJh95kl/Hl9S1OI5Jkk=
+github.com/jgautheron/goconst v1.7.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4=
github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs=
github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c=
github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg=
github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48=
github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0=
+github.com/jjti/go-spancheck v0.6.2 h1:iYtoxqPMzHUPp7St+5yA8+cONdyXD3ug6KK15n7Pklk=
+github.com/jjti/go-spancheck v0.6.2/go.mod h1:+X7lvIrR5ZdUTkxFYqzJ0abr8Sb5LOo80uOhWNqIrYA=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
-github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY=
github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0=
+github.com/karamaru-alpha/copyloopvar v1.1.0 h1:x7gNyKcC2vRBO1H2Mks5u1VxQtYvFiym7fCjIP8RPos=
+github.com/karamaru-alpha/copyloopvar v1.1.0/go.mod h1:u7CIfztblY0jZLOQZgH3oYsJzpC2A7S6u/lfgSXHy0k=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
-github.com/kisielk/errcheck v1.6.3 h1:dEKh+GLHcWm2oN34nMvDzn1sqI0i0WxPvrgiJA5JuM8=
-github.com/kisielk/errcheck v1.6.3/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw=
-github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
+github.com/kisielk/errcheck v1.7.0 h1:+SbscKmWJ5mOK/bO1zS60F5I9WwZDWOfRsC4RwfwRV0=
+github.com/kisielk/errcheck v1.7.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/kkHAIKE/contextcheck v1.1.4 h1:B6zAaLhOEEcjvUgIYEqystmnFk1Oemn8bvJhbt0GMb8=
-github.com/kkHAIKE/contextcheck v1.1.4/go.mod h1:1+i/gWqokIa+dm31mqGLZhZJ7Uh44DJGZVmr6QRBNJg=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kkHAIKE/contextcheck v1.1.5 h1:CdnJh63tcDe53vG+RebdpdXJTc9atMgGqdx8LXxiilg=
+github.com/kkHAIKE/contextcheck v1.1.5/go.mod h1:O930cpht4xb1YQpK+1+AgoM3mFsvxr7uyFptcnWTYUA=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
@@ -461,20 +364,24 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs=
github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I=
-github.com/kunwardeep/paralleltest v1.0.7 h1:2uCk94js0+nVNQoHZNLBkAR1DQJrVzw6T0RMzJn55dQ=
-github.com/kunwardeep/paralleltest v1.0.7/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY=
+github.com/kunwardeep/paralleltest v1.0.10 h1:wrodoaKYzS2mdNVnc4/w31YaXFtsc21PCTdvWJ/lDDs=
+github.com/kunwardeep/paralleltest v1.0.10/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ=
github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA=
-github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA=
-github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0=
+github.com/lasiar/canonicalheader v1.1.1 h1:wC+dY9ZfiqiPwAexUApFush/csSPXeIi4QqyxXmng8I=
+github.com/lasiar/canonicalheader v1.1.1/go.mod h1:cXkb3Dlk6XXy+8MVQnF23CYKWlyA7kfQhSw2CcZtZb0=
+github.com/ldez/gomoddirectives v0.2.4 h1:j3YjBIjEBbqZ0NKtBNzr8rtMHTOrLPeiwTkfUJZ3alg=
+github.com/ldez/gomoddirectives v0.2.4/go.mod h1:oWu9i62VcQDYp9EQ0ONTfqLNh+mDLWWDO+SO0qSQw5g=
github.com/ldez/tagliatelle v0.5.0 h1:epgfuYt9v0CG3fms0pEgIMNPuFf/LpPIfjk4kyqSioo=
github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4=
-github.com/leonklingele/grouper v1.1.1 h1:suWXRU57D4/Enn6pXR0QVqqWWrnJ9Osrz+5rjt8ivzU=
-github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY=
+github.com/leonklingele/grouper v1.1.2 h1:o1ARBDLOmmasUaNDesWqWCIFH3u7hoFlM84YrjT3mIY=
+github.com/leonklingele/grouper v1.1.2/go.mod h1:6D0M/HVkhs2yRKRFZUoGjeDy7EZTfFBE9gl4kjmIGkA=
github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM=
github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM=
+github.com/macabu/inamedparam v0.1.3 h1:2tk/phHkMlEL/1GNe/Yf6kkR/hkcUdAEY3L0hjYV1Mk=
+github.com/macabu/inamedparam v0.1.3/go.mod h1:93FLICAIk/quk7eaPPQvbzihUdn/QkGDwIZEoLtpH6I=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
@@ -492,17 +399,14 @@ github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxec
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
-github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
-github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
-github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo=
-github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc=
-github.com/mgechev/revive v1.3.2 h1:Wb8NQKBaALBJ3xrrj4zpwJwqwNA6nDpyJSEQWcCka6U=
-github.com/mgechev/revive v1.3.2/go.mod h1:UCLtc7o5vg5aXCwdUTU1kEBQ1v+YXPAkYDIDXbrs5I0=
+github.com/mgechev/revive v1.3.9 h1:18Y3R4a2USSBF+QZKFQwVkBROUda7uoBlkEuBD+YD1A=
+github.com/mgechev/revive v1.3.9/go.mod h1:+uxEIr5UH0TjXWHTno3xh4u7eg6jDpXKzQccA9UGhHU=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
@@ -510,140 +414,116 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
-github.com/moricho/tparallel v0.3.1 h1:fQKD4U1wRMAYNngDonW5XupoB/ZGJHdpzrWqgyg9krA=
-github.com/moricho/tparallel v0.3.1/go.mod h1:leENX2cUv7Sv2qDgdi0D0fCftN8fRC67Bcn8pqzeYNI=
+github.com/moricho/tparallel v0.3.2 h1:odr8aZVFA3NZrNybggMkYO3rgPRcqjeQUlBBFVxKHTI=
+github.com/moricho/tparallel v0.3.2/go.mod h1:OQ+K3b4Ln3l2TZveGCywybl68glfLEwFGqvnjok8b+U=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007 h1:28i1IjGcx8AofiB4N3q5Yls55VEaitzuEPkFJEVgGkA=
github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo=
github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U=
github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE=
-github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA=
-github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8=
github.com/ncw/swift v1.0.53 h1:luHjjTNtekIEvHg5KdAFIBaH7bWfNkefwFnpDffSIks=
github.com/ncw/swift v1.0.53/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
-github.com/nishanths/exhaustive v0.11.0 h1:T3I8nUGhl/Cwu5Z2hfc92l0e04D2GEW6e0l8pzda2l0=
-github.com/nishanths/exhaustive v0.11.0/go.mod h1:RqwDsZ1xY0dNdqHho2z6X+bgzizwbLYOWnZbbl2wLB4=
+github.com/nishanths/exhaustive v0.12.0 h1:vIY9sALmw6T/yxiASewa4TQcFsVYZQQRUQJhKRf3Swg=
+github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs=
github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk=
github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c=
-github.com/nunnatsa/ginkgolinter v0.12.1 h1:vwOqb5Nu05OikTXqhvLdHCGcx5uthIYIl0t79UVrERQ=
-github.com/nunnatsa/ginkgolinter v0.12.1/go.mod h1:AK8Ab1PypVrcGUusuKD8RDcl2KgsIwvNaaxAlyHSzso=
+github.com/nunnatsa/ginkgolinter v0.16.2 h1:8iLqHIZvN4fTLDC0Ke9tbSZVcyVHoBs0HIbnVSxfHJk=
+github.com/nunnatsa/ginkgolinter v0.16.2/go.mod h1:4tWRinDN1FeJgU+iJANW/kz7xKN5nYRAOfJDQUS9dOQ=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
-github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU=
-github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM=
-github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
-github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
-github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k=
+github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4=
+github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag=
+github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8=
+github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc=
github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
+github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU=
+github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w=
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs=
github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
github.com/pascaldekloe/name v0.0.0-20180628100202-0fd16699aae1 h1:/I3lTljEEDNYLho3/FUB7iD/oc2cEFgVmbHzV+O0PtU=
github.com/pascaldekloe/name v0.0.0-20180628100202-0fd16699aae1/go.mod h1:eD5JxqMiuNYyFNmyY9rkJ/slN8y59oEu4Ei7F8OoKWQ=
-github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU=
-github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
+github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
+github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/polyfloyd/go-errorlint v1.4.2 h1:CU+O4181IxFDdPH6t/HT7IiDj1I7zxNi1RIUxYwn8d0=
-github.com/polyfloyd/go-errorlint v1.4.2/go.mod h1:k6fU/+fQe38ednoZS51T7gSIGQW1y94d6TkSr35OzH8=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
-github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
+github.com/polyfloyd/go-errorlint v1.6.0 h1:tftWV9DE7txiFzPpztTAwyoRLKNj9gpVm2cg8/OwcYY=
+github.com/polyfloyd/go-errorlint v1.6.0/go.mod h1:HR7u8wuP1kb1NeN1zqTd1ZMlqUKPPHF+Id4vIPvDqVw=
+github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
+github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
-github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
github.com/pseudomuto/protokit v0.2.0 h1:hlnBDcy3YEDXH7kc9gV+NLaN0cDzhDvD1s7Y6FZ8RpM=
github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q=
-github.com/quasilyte/go-ruleguard v0.3.19 h1:tfMnabXle/HzOb5Xe9CUZYWXKfkS1KwRmZyPmD9nVcc=
-github.com/quasilyte/go-ruleguard v0.3.19/go.mod h1:lHSn69Scl48I7Gt9cX3VrbsZYvYiBYszZOZW4A+oTEw=
+github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 h1:+Wl/0aFp0hpuHM3H//KMft64WQ1yX9LdJY64Qm/gFCo=
+github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI=
+github.com/quasilyte/go-ruleguard/dsl v0.3.22 h1:wd8zkOhSNr+I+8Qeciml08ivDt1pSXe60+5DqOpCjPE=
+github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo=
github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng=
github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl980XxGFEZSS6KlBGIV0diGdySzxATTWoqaU=
github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0=
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs=
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.29.0 h1:Zes4hju04hjbvkVkOhdl2HpZa+0PmVwigmo8XoORE5w=
github.com/rs/zerolog v1.29.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/ryancurrah/gomodguard v1.3.0 h1:q15RT/pd6UggBXVBuLps8BXRvl5GPBcwVA7BJHMLuTw=
-github.com/ryancurrah/gomodguard v1.3.0/go.mod h1:ggBxb3luypPEzqVtq33ee7YSN35V28XeGnid8dnni50=
-github.com/ryanrolds/sqlclosecheck v0.4.0 h1:i8SX60Rppc1wRuyQjMciLqIzV3xnoHB7/tXbr6RGYNI=
-github.com/ryanrolds/sqlclosecheck v0.4.0/go.mod h1:TBRRjzL31JONc9i4XMinicuo+s+E8yKZ5FN8X3G6CKQ=
+github.com/ryancurrah/gomodguard v1.3.5 h1:cShyguSwUEeC0jS7ylOiG/idnd1TpJ1LfHGpV3oJmPU=
+github.com/ryancurrah/gomodguard v1.3.5/go.mod h1:MXlEPQRxgfPQa62O8wzK3Ozbkv9Rkqr+wKjSxTdsNJE=
+github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU=
+github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ=
github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc=
github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI=
+github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4=
+github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY=
github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw=
github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ=
-github.com/sashamelentyev/usestdlibvars v1.23.0 h1:01h+/2Kd+NblNItNeux0veSL5cBF1jbEOPrEhDzGYq0=
-github.com/sashamelentyev/usestdlibvars v1.23.0/go.mod h1:YPwr/Y1LATzHI93CqoPUN/2BzGQ/6N/cl/KwgR0B/aU=
-github.com/securego/gosec/v2 v2.16.0 h1:Pi0JKoasQQ3NnoRao/ww/N/XdynIB9NRYYZT5CyOs5U=
-github.com/securego/gosec/v2 v2.16.0/go.mod h1:xvLcVZqUfo4aAQu56TNv7/Ltz6emAOQAEsrZrt7uGlI=
+github.com/sashamelentyev/usestdlibvars v1.27.0 h1:t/3jZpSXtRPRf2xr0m63i32ZrusyurIGT9E5wAvXQnI=
+github.com/sashamelentyev/usestdlibvars v1.27.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8=
+github.com/securego/gosec/v2 v2.21.2 h1:deZp5zmYf3TWwU7A7cR2+SolbTpZ3HQiwFqnzQyEl3M=
+github.com/securego/gosec/v2 v2.21.2/go.mod h1:au33kg78rNseF5PwPnTWhuYBFf534bvJRvOrgZ/bFzU=
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU=
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs=
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE=
github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4=
-github.com/sivchari/nosnakecase v1.7.0 h1:7QkpWIRMe8x25gckkFd2A5Pi6Ymo0qgr4JrhGt95do8=
-github.com/sivchari/nosnakecase v1.7.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY=
-github.com/sivchari/tenv v1.7.1 h1:PSpuD4bu6fSmtWMxSGWcvqUUgIn7k3yOJhOIzVWn8Ak=
-github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg=
+github.com/sivchari/tenv v1.10.0 h1:g/hzMA+dBCKqGXgW8AV/1xIWhAvDrx0zFKNR48NFMg0=
+github.com/sivchari/tenv v1.10.0/go.mod h1:tdY24masnVoZFxYrHv/nD6Tc8FbkEtAQEEziXpyMgqY=
github.com/sonatard/noctx v0.0.2 h1:L7Dz4De2zDQhW8S0t+KUjY0MAQJd6SgVwhzNIc4ok00=
github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo=
github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0=
github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY=
-github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
+github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
+github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
-github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
-github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
+github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
+github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
@@ -655,13 +535,11 @@ github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRk
github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc=
github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
@@ -670,45 +548,43 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
-github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c h1:+aPplBwWcHBo6q9xrfWdMrT9o4kltkmmvpemgIjep/8=
-github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk=
github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9qkHM=
github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg=
github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA=
github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0=
github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag=
github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY=
-github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw=
-github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8=
+github.com/tetafro/godot v1.4.17 h1:pGzu+Ye7ZUEFx7LHU0dAKmCOXWsPjl7qA6iMGndsjPs=
+github.com/tetafro/godot v1.4.17/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio=
github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 h1:quvGphlmUVU+nhpFa4gg4yJyTRJ13reZMDHrKwYw53M=
github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ=
github.com/timonwong/loggercheck v0.9.4 h1:HKKhqrjcVj8sxL7K77beXh0adEm6DLjV/QOGeMXEVi4=
github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg=
-github.com/tomarrell/wrapcheck/v2 v2.8.1 h1:HxSqDSN0sAt0yJYsrcYVoEeyM4aI9yAm3KQpIXDJRhQ=
-github.com/tomarrell/wrapcheck/v2 v2.8.1/go.mod h1:/n2Q3NZ4XFT50ho6Hbxg+RV1uyo2Uow/Vdm9NQcl5SE=
+github.com/tomarrell/wrapcheck/v2 v2.9.0 h1:801U2YCAjLhdN8zhZ/7tdjB3EnAoRlJHt/s+9hijLQ4=
+github.com/tomarrell/wrapcheck/v2 v2.9.0/go.mod h1:g9vNIyhb5/9TQgumxQyOEqDHsmGYcGsVMOx/xGkqdMo=
github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw=
github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
-github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA=
-github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
-github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqzi/CzI=
-github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA=
-github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842Y=
-github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY=
+github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI=
+github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4=
+github.com/ultraware/whitespace v0.1.1 h1:bTPOGejYFulW3PkcrqkeQwOd6NKOOXvmGD9bo/Gk8VQ=
+github.com/ultraware/whitespace v0.1.1/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8=
+github.com/uudashr/gocognit v1.1.3 h1:l+a111VcDbKfynh+airAy/DJQKaXh2m9vkoysMPSZyM=
+github.com/uudashr/gocognit v1.1.3/go.mod h1:aKH8/e8xbTRBwjbCkwZ8qt4l2EpKXl31KMHgSS+lZ2U=
github.com/vektra/mockery/v2 v2.40.3 h1:IZ2lydSDFsY0khnEsbSu13VLcqSsa6UYSS/8F+uOJmo=
github.com/vektra/mockery/v2 v2.40.3/go.mod h1:KYBZF/7sqOa86BaOZPYsoCZWEWLS90a5oBLg2pVudxY=
-github.com/xen0n/gosmopolitan v1.2.1 h1:3pttnTuFumELBRSh+KQs1zcz4fN6Zy7aB0xlnQSn1Iw=
-github.com/xen0n/gosmopolitan v1.2.1/go.mod h1:JsHq/Brs1o050OOdmzHeOr0N7OtlnKRAGAsElF8xBQA=
+github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HHtvU=
+github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg=
github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM=
github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk=
-github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o=
-github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA=
-github.com/ykadowak/zerologlint v0.1.2 h1:Um4P5RMmelfjQqQJKtE8ZW+dLZrXrENeIzWWKw800U4=
-github.com/ykadowak/zerologlint v0.1.2/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg=
+github.com/yeya24/promlinter v0.3.0 h1:JVDbMp08lVCP7Y6NP3qHroGAO6z2yGKQtS5JsjqtoFs=
+github.com/yeya24/promlinter v0.3.0/go.mod h1:cDfJQQYv9uYciW60QT0eeHlFodotkYZlL+YcPQN+mW4=
+github.com/ykadowak/zerologlint v0.1.5 h1:Gy/fMz1dFQN9JZTPjv1hxEk+sRWm05row04Yoolgdiw=
+github.com/ykadowak/zerologlint v0.1.5/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -716,36 +592,34 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-gitlab.com/bosi/decorder v0.2.3 h1:gX4/RgK16ijY8V+BRQHAySfQAb354T7/xQpDB2n10P0=
-gitlab.com/bosi/decorder v0.2.3/go.mod h1:9K1RB5+VPNQYtXtTDAzd2OEftsZb1oV0IrJrzChSdGE=
-go-simpler.org/assert v0.5.0 h1:+5L/lajuQtzmbtEfh69sr5cRf2/xZzyJhFjoOz/PPqs=
-go-simpler.org/assert v0.5.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+gitlab.com/bosi/decorder v0.4.2 h1:qbQaV3zgwnBZ4zPMhGLW4KZe7A7NwxEhJx39R3shffo=
+gitlab.com/bosi/decorder v0.4.2/go.mod h1:muuhHoaJkA9QLcYHq4Mj8FJUwDZ+EirSHRiaTcTf6T8=
+go-simpler.org/assert v0.9.0 h1:PfpmcSvL7yAnWyChSjOz6Sp6m9j5lyK8Ok9pEL31YkQ=
+go-simpler.org/assert v0.9.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28=
+go-simpler.org/musttag v0.12.2 h1:J7lRc2ysXOq7eM8rwaTYnNrHd5JwjppzB6mScysB2Cs=
+go-simpler.org/musttag v0.12.2/go.mod h1:uN1DVIasMTQKk6XSik7yrJoEysGtR2GRqvWnI9S7TYM=
+go-simpler.org/sloglint v0.7.2 h1:Wc9Em/Zeuu7JYpl+oKoYOsQSy2X560aVueCW/m6IijY=
+go-simpler.org/sloglint v0.7.2/go.mod h1:US+9C80ppl7VsThQclkM7BkCHQAzuz8kHLsW3ppuluo=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo=
-go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
-go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=
+go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw=
+go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8=
go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4=
go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.19.0 h1:Nw7Dv4lwvGrI68+wULbcq7su9K2cebeCUrDjVrUJHxM=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.19.0/go.mod h1:1MsF6Y7gTqosgoZvHlzcaaM8DIMNZgJh87ykokoNH7Y=
-go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
-go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
-go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
-go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
-go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
-go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
-go.tmz.dev/musttag v0.7.0 h1:QfytzjTWGXZmChoX0L++7uQN+yRCPfyFm+whsM+lfGc=
-go.tmz.dev/musttag v0.7.0/go.mod h1:oTFPvgOkJmp5kYL02S8+jrH0eLrBIl57rzWeA26zDEM=
+go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc=
+go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8=
+go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
+go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
+go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4=
+go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ=
+go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
+go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
@@ -753,56 +627,25 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8
go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c=
go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk=
golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
-golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
-golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
+golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
+golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea h1:vLCWI/yYrdEHyN2JzIzPO3aaQJHQdp89IZBA/+azVC4=
-golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
+golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e h1:I88y4caeGeuDQxgdoFPUq097j7kNfw6uvuiNxUBfcBk=
+golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ=
golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
-golang.org/x/exp/typeparams v0.0.0-20230224173230-c95f2b4c22f2 h1:J74nGeMgeFnYQJN59eFwh06jX/V8g0lB7LWpjSLxtgU=
-golang.org/x/exp/typeparams v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8=
+golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
@@ -811,135 +654,63 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
-golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
+golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
-golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
-golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
+golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
+golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
-golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
+golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
+golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
-golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
+golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -948,93 +719,46 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
-golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
+golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
-golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=
-golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM=
+golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
-golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
-golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
+golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
+golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524210228-3d17549cdc6b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
-golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU=
golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU=
@@ -1042,113 +766,40 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
-golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
+golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
+golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
-golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
-google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
-google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
-google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
-google.golang.org/api v0.155.0 h1:vBmGhCYs0djJttDNynWo44zosHlPvHmA0XiN2zP2DtA=
-google.golang.org/api v0.155.0/go.mod h1:GI5qK5f40kCpHfPn6+YzGAByIKWv8ujFnmoWm7Igduk=
+google.golang.org/api v0.196.0 h1:k/RafYqebaIJBO3+SMnfEGtFVlvp5vSgqTUF54UN/zg=
+google.golang.org/api v0.196.0/go.mod h1:g9IL21uGkYgvQ5BZg6BAtoGJQIm8r6EgaAbpNey5wBE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
-google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
-google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
-google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
-google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
-google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU=
-google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s=
+google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU=
+google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4=
+google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed h1:3RgNmBoI9MZhsj3QxC+AP/qQhNwpCLOvYDYYsFrhFt0=
+google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
-google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
-google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
-google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
+google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c=
+google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1157,43 +808,27 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
-google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
+google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.4.3 h1:o/n5/K5gXqk8Gozvs2cnL0F2S1/g1vcGCAx2vETjITw=
-honnef.co/go/tools v0.4.3/go.mod h1:36ZgoUOrqOk1GxwHhyryEkq8FQWkUO2xGuSMhUCcdvA=
+honnef.co/go/tools v0.5.1 h1:4bH5o3b5ZULQ4UrBmP+63W9r7qIkqJClEA9ko5YKx+I=
+honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs=
k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw=
k8s.io/api v0.28.2/go.mod h1:RVnJBsjU8tcMq7C3iaRSGMeaKt2TWEUXcpIt/90fjEg=
k8s.io/apiextensions-apiserver v0.28.0 h1:CszgmBL8CizEnj4sj7/PtLGey6Na3YgWyGCPONv7E9E=
@@ -1210,17 +845,10 @@ k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5Ohx
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk=
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-mvdan.cc/gofumpt v0.5.0 h1:0EQ+Z56k8tXjj/6TQD25BFNKQXpCvT0rnansIc7Ug5E=
-mvdan.cc/gofumpt v0.5.0/go.mod h1:HBeVDtMKRZpXyxFciAirzdKklDlGu8aAy1wEbH5Y9js=
-mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I=
-mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
-mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo=
-mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
-mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d h1:3rvTIIM22r9pvXk+q3swxUQAQOxksVMGK7sml4nG57w=
-mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d/go.mod h1:IeHQjmn6TOD+e4Z3RFiZMMsLVL+A96Nvptar8Fj71is=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+mvdan.cc/gofumpt v0.7.0 h1:bg91ttqXmi9y2xawvkuMXyvAA/1ZGJqYAEGjXuP0JXU=
+mvdan.cc/gofumpt v0.7.0/go.mod h1:txVFJy/Sc/mvaycET54pV8SW8gWxTlUuGHVEcncmNUo=
+mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f h1:lMpcwN6GxNbWtbpI1+xzFLSW8XzX0u72NttUGVFjO3U=
+mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f/go.mod h1:RSLa7mKKCNeTTMHBw5Hsy2rfJmd6O2ivt9Dw9ZqCQpQ=
sigs.k8s.io/controller-runtime v0.16.2 h1:mwXAVuEk3EQf478PQwQ48zGOXvW27UJc8NHktQVuIPU=
sigs.k8s.io/controller-runtime v0.16.2/go.mod h1:vpMu3LpI5sYWtujJOa2uPK61nB5rbwlN7BAB8aSLvGU=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
diff --git a/boilerplate/flyte/golang_test_targets/Makefile b/boilerplate/flyte/golang_test_targets/Makefile
index c02409a318..6492014917 100644
--- a/boilerplate/flyte/golang_test_targets/Makefile
+++ b/boilerplate/flyte/golang_test_targets/Makefile
@@ -15,7 +15,7 @@ generate: download_tooling #generate go code
.PHONY: lint
lint: download_tooling #lints the package for common code smells
- GL_DEBUG=linters_output,env golangci-lint run $(LINT_FLAGS) --deadline=5m --exclude deprecated -v
+ GL_DEBUG=linters_output,env golangci-lint run $(LINT_FLAGS) --timeout=5m --exclude deprecated -v
.PHONY: lint-fix
lint-fix: LINT_FLAGS=--fix
diff --git a/datacatalog/.golangci.yml b/datacatalog/.golangci.yml
index 6d13f4a3b6..77107079d0 100644
--- a/datacatalog/.golangci.yml
+++ b/datacatalog/.golangci.yml
@@ -1,35 +1,25 @@
-# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES.
-# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY:
-#
-# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst
-
run:
skip-dirs:
- pkg/client
-
linters:
disable-all: true
enable:
- - deadcode
- errcheck
- - gas
+ - gosec
- gci
- goconst
- goimports
- - golint
- gosimple
- govet
- ineffassign
- misspell
- nakedret
- staticcheck
- - structcheck
- typecheck
- unconvert
- unparam
- unused
- - varcheck
-
+ - protogetter
linters-settings:
gci:
custom-order: true
@@ -38,6 +28,8 @@ linters-settings:
- default
- prefix(github.com/flyteorg)
skip-generated: true
+ goconst:
+ ignore-tests: true
issues:
exclude:
- copylocks
diff --git a/datacatalog/pkg/manager/impl/artifact_data_store.go b/datacatalog/pkg/manager/impl/artifact_data_store.go
index 5cbd3cc3e0..fa4a14c903 100644
--- a/datacatalog/pkg/manager/impl/artifact_data_store.go
+++ b/datacatalog/pkg/manager/impl/artifact_data_store.go
@@ -27,8 +27,8 @@ type artifactDataStore struct {
}
func (m *artifactDataStore) getDataLocation(ctx context.Context, artifact *datacatalog.Artifact, data *datacatalog.ArtifactData) (storage.DataReference, error) {
- dataset := artifact.Dataset
- return m.store.ConstructReference(ctx, m.storagePrefix, dataset.Project, dataset.Domain, dataset.Name, dataset.Version, artifact.Id, data.Name, artifactDataFile)
+ dataset := artifact.GetDataset()
+ return m.store.ConstructReference(ctx, m.storagePrefix, dataset.GetProject(), dataset.GetDomain(), dataset.GetName(), dataset.GetVersion(), artifact.GetId(), data.GetName(), artifactDataFile)
}
// Store marshalled data in data.pb under the storage prefix
@@ -37,7 +37,7 @@ func (m *artifactDataStore) PutData(ctx context.Context, artifact *datacatalog.A
if err != nil {
return "", errors.NewDataCatalogErrorf(codes.Internal, "Unable to generate data location %s, err %v", dataLocation.String(), err)
}
- err = m.store.WriteProtobuf(ctx, dataLocation, storage.Options{}, data.Value)
+ err = m.store.WriteProtobuf(ctx, dataLocation, storage.Options{}, data.GetValue())
if err != nil {
return "", errors.NewDataCatalogErrorf(codes.Internal, "Unable to store artifact data in location %s, err %v", dataLocation.String(), err)
}
diff --git a/datacatalog/pkg/manager/impl/artifact_manager.go b/datacatalog/pkg/manager/impl/artifact_manager.go
index 40f3f40538..f32cb3f31f 100644
--- a/datacatalog/pkg/manager/impl/artifact_manager.go
+++ b/datacatalog/pkg/manager/impl/artifact_manager.go
@@ -58,7 +58,7 @@ func (m *artifactManager) CreateArtifact(ctx context.Context, request *datacatal
timer := m.systemMetrics.createResponseTime.Start(ctx)
defer timer.Stop()
- artifact := request.Artifact
+ artifact := request.GetArtifact()
err := validators.ValidateArtifact(artifact)
if err != nil {
logger.Warningf(ctx, "Invalid create artifact request %v, err: %v", request, err)
@@ -66,8 +66,8 @@ func (m *artifactManager) CreateArtifact(ctx context.Context, request *datacatal
return nil, err
}
- ctx = contextutils.WithProjectDomain(ctx, artifact.Dataset.Project, artifact.Dataset.Domain)
- datasetKey := transformers.FromDatasetID(artifact.Dataset)
+ ctx = contextutils.WithProjectDomain(ctx, artifact.GetDataset().GetProject(), artifact.GetDataset().GetDomain())
+ datasetKey := transformers.FromDatasetID(artifact.GetDataset())
// The dataset must exist for the artifact, let's verify that first
dataset, err := m.repo.DatasetRepo().Get(ctx, datasetKey)
@@ -80,16 +80,16 @@ func (m *artifactManager) CreateArtifact(ctx context.Context, request *datacatal
// TODO: when adding a tag, need to verify one tag per partition combo
// check that the artifact's partitions are the same partition values of the dataset
datasetPartitionKeys := transformers.FromPartitionKeyModel(dataset.PartitionKeys)
- err = validators.ValidatePartitions(datasetPartitionKeys, artifact.Partitions)
+ err = validators.ValidatePartitions(datasetPartitionKeys, artifact.GetPartitions())
if err != nil {
- logger.Warnf(ctx, "Invalid artifact partitions %v, err: %+v", artifact.Partitions, err)
+ logger.Warnf(ctx, "Invalid artifact partitions %v, err: %+v", artifact.GetPartitions(), err)
m.systemMetrics.createFailureCounter.Inc(ctx)
return nil, err
}
// create Artifact Data offloaded storage files
- artifactDataModels := make([]models.ArtifactData, len(request.Artifact.Data))
- for i, artifactData := range request.Artifact.Data {
+ artifactDataModels := make([]models.ArtifactData, len(request.GetArtifact().GetData()))
+ for i, artifactData := range request.GetArtifact().GetData() {
dataLocation, err := m.artifactStore.PutData(ctx, artifact, artifactData)
if err != nil {
logger.Errorf(ctx, "Failed to store artifact data err: %v", err)
@@ -97,12 +97,12 @@ func (m *artifactManager) CreateArtifact(ctx context.Context, request *datacatal
return nil, err
}
- artifactDataModels[i].Name = artifactData.Name
+ artifactDataModels[i].Name = artifactData.GetName()
artifactDataModels[i].Location = dataLocation.String()
m.systemMetrics.createDataSuccessCounter.Inc(ctx)
}
- logger.Debugf(ctx, "Stored %v data for artifact %+v", len(artifactDataModels), artifact.Id)
+ logger.Debugf(ctx, "Stored %v data for artifact %+v", len(artifactDataModels), artifact.GetId())
artifactModel, err := transformers.CreateArtifactModel(request, artifactDataModels, dataset)
if err != nil {
@@ -114,7 +114,7 @@ func (m *artifactManager) CreateArtifact(ctx context.Context, request *datacatal
err = m.repo.ArtifactRepo().Create(ctx, artifactModel)
if err != nil {
if errors.IsAlreadyExistsError(err) {
- logger.Warnf(ctx, "Artifact already exists key: %+v, err %v", artifact.Id, err)
+ logger.Warnf(ctx, "Artifact already exists key: %+v, err %v", artifact.GetId(), err)
m.systemMetrics.alreadyExistsCounter.Inc(ctx)
} else {
logger.Errorf(ctx, "Failed to create artifact %v, err: %v", artifactDataModels, err)
@@ -123,7 +123,7 @@ func (m *artifactManager) CreateArtifact(ctx context.Context, request *datacatal
return nil, err
}
- logger.Debugf(ctx, "Successfully created artifact id: %v", artifact.Id)
+ logger.Debugf(ctx, "Successfully created artifact id: %v", artifact.GetId())
m.systemMetrics.createSuccessCounter.Inc(ctx)
return &datacatalog.CreateArtifactResponse{}, nil
@@ -141,7 +141,7 @@ func (m *artifactManager) GetArtifact(ctx context.Context, request *datacatalog.
return nil, err
}
- datasetID := request.Dataset
+ datasetID := request.GetDataset()
artifactModel, err := m.findArtifact(ctx, datasetID, request)
if err != nil {
@@ -164,7 +164,7 @@ func (m *artifactManager) GetArtifact(ctx context.Context, request *datacatalog.
}
artifact.Data = artifactDataList
- logger.Debugf(ctx, "Retrieved artifact dataset %v, id: %v", artifact.Dataset, artifact.Id)
+ logger.Debugf(ctx, "Retrieved artifact dataset %v, id: %v", artifact.GetDataset(), artifact.GetId())
m.systemMetrics.getSuccessCounter.Inc(ctx)
return &datacatalog.GetArtifactResponse{
Artifact: artifact,
@@ -249,7 +249,7 @@ func (m *artifactManager) ListArtifacts(ctx context.Context, request *datacatalo
}
// Verify the dataset exists before listing artifacts
- datasetKey := transformers.FromDatasetID(request.Dataset)
+ datasetKey := transformers.FromDatasetID(request.GetDataset())
dataset, err := m.repo.DatasetRepo().Get(ctx, datasetKey)
if err != nil {
logger.Warnf(ctx, "Failed to get dataset for listing artifacts %v, err: %v", datasetKey, err)
@@ -265,7 +265,7 @@ func (m *artifactManager) ListArtifacts(ctx context.Context, request *datacatalo
return nil, err
}
- err = transformers.ApplyPagination(request.Pagination, &listInput)
+ err = transformers.ApplyPagination(request.GetPagination(), &listInput)
if err != nil {
logger.Warningf(ctx, "Invalid pagination options in list artifact request %v, err: %v", request, err)
m.systemMetrics.validationErrorCounter.Inc(ctx)
@@ -311,7 +311,7 @@ func (m *artifactManager) ListArtifacts(ctx context.Context, request *datacatalo
// stored data will be overwritten in the underlying blob storage, no longer existing data (based on ArtifactData name)
// will be deleted.
func (m *artifactManager) UpdateArtifact(ctx context.Context, request *datacatalog.UpdateArtifactRequest) (*datacatalog.UpdateArtifactResponse, error) {
- ctx = contextutils.WithProjectDomain(ctx, request.Dataset.Project, request.Dataset.Domain)
+ ctx = contextutils.WithProjectDomain(ctx, request.GetDataset().GetProject(), request.GetDataset().GetDomain())
timer := m.systemMetrics.updateResponseTime.Start(ctx)
defer timer.Stop()
@@ -333,9 +333,9 @@ func (m *artifactManager) UpdateArtifact(ctx context.Context, request *datacatal
}
// artifactModel needs to be updated with new SerializedMetadata
- serializedMetadata, err := transformers.SerializedMetadata(request.Metadata)
+ serializedMetadata, err := transformers.SerializedMetadata(request.GetMetadata())
if err != nil {
- logger.Errorf(ctx, "Error in transforming Metadata from request %+v, err %v", request.Metadata, err)
+ logger.Errorf(ctx, "Error in transforming Metadata from request %+v, err %v", request.GetMetadata(), err)
m.systemMetrics.transformerErrorCounter.Inc(ctx)
m.systemMetrics.updateFailureCounter.Inc(ctx)
return nil, err
@@ -353,9 +353,9 @@ func (m *artifactManager) UpdateArtifact(ctx context.Context, request *datacatal
// overwrite existing artifact data and upload new entries, building a map of artifact data names to remove
// deleted entries from the blob storage after the upload completed
artifactDataNames := make(map[string]struct{})
- artifactDataModels := make([]models.ArtifactData, len(request.Data))
- for i, artifactData := range request.Data {
- artifactDataNames[artifactData.Name] = struct{}{}
+ artifactDataModels := make([]models.ArtifactData, len(request.GetData()))
+ for i, artifactData := range request.GetData() {
+ artifactDataNames[artifactData.GetName()] = struct{}{}
dataLocation, err := m.artifactStore.PutData(ctx, artifact, artifactData)
if err != nil {
@@ -365,7 +365,7 @@ func (m *artifactManager) UpdateArtifact(ctx context.Context, request *datacatal
return nil, err
}
- artifactDataModels[i].Name = artifactData.Name
+ artifactDataModels[i].Name = artifactData.GetName()
artifactDataModels[i].Location = dataLocation.String()
m.systemMetrics.updateDataSuccessCounter.Inc(ctx)
}
@@ -384,7 +384,7 @@ func (m *artifactManager) UpdateArtifact(ctx context.Context, request *datacatal
err = m.repo.ArtifactRepo().Update(ctx, artifactModel)
if err != nil {
if errors.IsDoesNotExistError(err) {
- logger.Warnf(ctx, "Artifact does not exist key: %+v, err %v", artifact.Id, err)
+ logger.Warnf(ctx, "Artifact does not exist key: %+v, err %v", artifact.GetId(), err)
m.systemMetrics.doesNotExistCounter.Inc(ctx)
} else {
logger.Errorf(ctx, "Failed to update artifact %v, err: %v", artifactModel, err)
@@ -408,11 +408,11 @@ func (m *artifactManager) UpdateArtifact(ctx context.Context, request *datacatal
m.systemMetrics.deleteDataSuccessCounter.Inc(ctx)
}
- logger.Debugf(ctx, "Successfully updated artifact id: %v", artifact.Id)
+ logger.Debugf(ctx, "Successfully updated artifact id: %v", artifact.GetId())
m.systemMetrics.updateSuccessCounter.Inc(ctx)
return &datacatalog.UpdateArtifactResponse{
- ArtifactId: artifact.Id,
+ ArtifactId: artifact.GetId(),
}, nil
}
diff --git a/datacatalog/pkg/manager/impl/artifact_manager_test.go b/datacatalog/pkg/manager/impl/artifact_manager_test.go
index 2bf39b04d9..0df125f1ec 100644
--- a/datacatalog/pkg/manager/impl/artifact_manager_test.go
+++ b/datacatalog/pkg/manager/impl/artifact_manager_test.go
@@ -107,50 +107,50 @@ func newMockDataCatalogRepo() *mocks.DataCatalogRepo {
}
func getExpectedDatastoreLocation(ctx context.Context, store *storage.DataStore, prefix storage.DataReference, artifact *datacatalog.Artifact, idx int) (storage.DataReference, error) {
- return getExpectedDatastoreLocationFromName(ctx, store, prefix, artifact, artifact.Data[idx].Name)
+ return getExpectedDatastoreLocationFromName(ctx, store, prefix, artifact, artifact.GetData()[idx].GetName())
}
func getExpectedDatastoreLocationFromName(ctx context.Context, store *storage.DataStore, prefix storage.DataReference, artifact *datacatalog.Artifact, artifactDataName string) (storage.DataReference, error) {
- dataset := artifact.Dataset
- return store.ConstructReference(ctx, prefix, dataset.Project, dataset.Domain, dataset.Name, dataset.Version, artifact.Id, artifactDataName, artifactDataFile)
+ dataset := artifact.GetDataset()
+ return store.ConstructReference(ctx, prefix, dataset.GetProject(), dataset.GetDomain(), dataset.GetName(), dataset.GetVersion(), artifact.GetId(), artifactDataName, artifactDataFile)
}
func getExpectedArtifactModel(ctx context.Context, t *testing.T, datastore *storage.DataStore, artifact *datacatalog.Artifact) models.Artifact {
- expectedDataset := artifact.Dataset
+ expectedDataset := artifact.GetDataset()
- artifactData := make([]models.ArtifactData, len(artifact.Data))
+ artifactData := make([]models.ArtifactData, len(artifact.GetData()))
// Write sample artifact data to the expected location and see if the retrieved data matches
- for i := range artifact.Data {
+ for i := range artifact.GetData() {
testStoragePrefix, err := datastore.ConstructReference(ctx, datastore.GetBaseContainerFQN(ctx), "test")
assert.NoError(t, err)
dataLocation, err := getExpectedDatastoreLocation(ctx, datastore, testStoragePrefix, artifact, i)
assert.NoError(t, err)
- err = datastore.WriteProtobuf(ctx, dataLocation, storage.Options{}, artifact.Data[i].Value)
+ err = datastore.WriteProtobuf(ctx, dataLocation, storage.Options{}, artifact.GetData()[i].GetValue())
assert.NoError(t, err)
- artifactData[i].Name = artifact.Data[i].Name
+ artifactData[i].Name = artifact.GetData()[i].GetName()
artifactData[i].Location = dataLocation.String()
}
// construct the artifact model we will return on the queries
- serializedMetadata, err := proto.Marshal(artifact.Metadata)
+ serializedMetadata, err := proto.Marshal(artifact.GetMetadata())
assert.NoError(t, err)
datasetKey := models.DatasetKey{
- Project: expectedDataset.Project,
- Domain: expectedDataset.Domain,
- Version: expectedDataset.Version,
- Name: expectedDataset.Name,
- UUID: expectedDataset.UUID,
+ Project: expectedDataset.GetProject(),
+ Domain: expectedDataset.GetDomain(),
+ Version: expectedDataset.GetVersion(),
+ Name: expectedDataset.GetName(),
+ UUID: expectedDataset.GetUUID(),
}
return models.Artifact{
ArtifactKey: models.ArtifactKey{
- DatasetProject: expectedDataset.Project,
- DatasetDomain: expectedDataset.Domain,
- DatasetVersion: expectedDataset.Version,
- DatasetName: expectedDataset.Name,
- ArtifactID: artifact.Id,
+ DatasetProject: expectedDataset.GetProject(),
+ DatasetDomain: expectedDataset.GetDomain(),
+ DatasetVersion: expectedDataset.GetVersion(),
+ DatasetName: expectedDataset.GetName(),
+ ArtifactID: artifact.GetId(),
},
- DatasetUUID: expectedDataset.UUID,
+ DatasetUUID: expectedDataset.GetUUID(),
ArtifactData: artifactData,
Dataset: models.Dataset{
DatasetKey: datasetKey,
@@ -162,7 +162,7 @@ func getExpectedArtifactModel(ctx context.Context, t *testing.T, datastore *stor
{Key: "key2", Value: "value2"},
},
Tags: []models.Tag{
- {TagKey: models.TagKey{TagName: "test-tag"}, DatasetUUID: expectedDataset.UUID, ArtifactID: artifact.Id},
+ {TagKey: models.TagKey{TagName: "test-tag"}, DatasetUUID: expectedDataset.GetUUID(), ArtifactID: artifact.GetId()},
},
BaseModel: models.BaseModel{
CreatedAt: getTestTimestamp(),
@@ -180,15 +180,15 @@ func TestCreateArtifact(t *testing.T) {
expectedDataset := getTestDataset()
mockDatasetModel := models.Dataset{
DatasetKey: models.DatasetKey{
- Project: expectedDataset.Id.Project,
- Domain: expectedDataset.Id.Domain,
- Name: expectedDataset.Id.Name,
- Version: expectedDataset.Id.Version,
- UUID: expectedDataset.Id.UUID,
+ Project: expectedDataset.GetId().GetProject(),
+ Domain: expectedDataset.GetId().GetDomain(),
+ Name: expectedDataset.GetId().GetName(),
+ Version: expectedDataset.GetId().GetVersion(),
+ UUID: expectedDataset.GetId().GetUUID(),
},
PartitionKeys: []models.PartitionKey{
- {Name: expectedDataset.PartitionKeys[0]},
- {Name: expectedDataset.PartitionKeys[1]},
+ {Name: expectedDataset.GetPartitionKeys()[0]},
+ {Name: expectedDataset.GetPartitionKeys()[1]},
},
}
@@ -200,30 +200,30 @@ func TestCreateArtifact(t *testing.T) {
dcRepo := newMockDataCatalogRepo()
dcRepo.MockDatasetRepo.On("Get", mock.Anything,
mock.MatchedBy(func(dataset models.DatasetKey) bool {
- return dataset.Project == expectedDataset.Id.Project &&
- dataset.Domain == expectedDataset.Id.Domain &&
- dataset.Name == expectedDataset.Id.Name &&
- dataset.Version == expectedDataset.Id.Version
+ return dataset.Project == expectedDataset.GetId().GetProject() &&
+ dataset.Domain == expectedDataset.GetId().GetDomain() &&
+ dataset.Name == expectedDataset.GetId().GetName() &&
+ dataset.Version == expectedDataset.GetId().GetVersion()
})).Return(mockDatasetModel, nil)
dcRepo.MockArtifactRepo.On("Create",
mock.MatchedBy(func(ctx context.Context) bool { return true }),
mock.MatchedBy(func(artifact models.Artifact) bool {
expectedArtifact := getTestArtifact()
- return artifact.ArtifactID == expectedArtifact.Id &&
+ return artifact.ArtifactID == expectedArtifact.GetId() &&
artifact.SerializedMetadata != nil &&
- len(artifact.ArtifactData) == len(expectedArtifact.Data) &&
- artifact.ArtifactKey.DatasetProject == expectedArtifact.Dataset.Project &&
- artifact.ArtifactKey.DatasetDomain == expectedArtifact.Dataset.Domain &&
- artifact.ArtifactKey.DatasetName == expectedArtifact.Dataset.Name &&
- artifact.ArtifactKey.DatasetVersion == expectedArtifact.Dataset.Version &&
- artifact.DatasetUUID == expectedArtifact.Dataset.UUID &&
- artifact.Partitions[0].Key == expectedArtifact.Partitions[0].Key &&
- artifact.Partitions[0].Value == expectedArtifact.Partitions[0].Value &&
- artifact.Partitions[0].DatasetUUID == expectedDataset.Id.UUID &&
- artifact.Partitions[1].Key == expectedArtifact.Partitions[1].Key &&
- artifact.Partitions[1].Value == expectedArtifact.Partitions[1].Value &&
- artifact.Partitions[1].DatasetUUID == expectedDataset.Id.UUID
+ len(artifact.ArtifactData) == len(expectedArtifact.GetData()) &&
+ artifact.ArtifactKey.DatasetProject == expectedArtifact.GetDataset().GetProject() &&
+ artifact.ArtifactKey.DatasetDomain == expectedArtifact.GetDataset().GetDomain() &&
+ artifact.ArtifactKey.DatasetName == expectedArtifact.GetDataset().GetName() &&
+ artifact.ArtifactKey.DatasetVersion == expectedArtifact.GetDataset().GetVersion() &&
+ artifact.DatasetUUID == expectedArtifact.GetDataset().GetUUID() &&
+ artifact.Partitions[0].Key == expectedArtifact.GetPartitions()[0].GetKey() &&
+ artifact.Partitions[0].Value == expectedArtifact.GetPartitions()[0].GetValue() &&
+ artifact.Partitions[0].DatasetUUID == expectedDataset.GetId().GetUUID() &&
+ artifact.Partitions[1].Key == expectedArtifact.GetPartitions()[1].GetKey() &&
+ artifact.Partitions[1].Value == expectedArtifact.GetPartitions()[1].GetValue() &&
+ artifact.Partitions[1].DatasetUUID == expectedDataset.GetId().GetUUID()
})).Return(nil)
request := &datacatalog.CreateArtifactRequest{Artifact: getTestArtifact()}
@@ -238,7 +238,7 @@ func TestCreateArtifact(t *testing.T) {
var value core.Literal
err = datastore.ReadProtobuf(ctx, dataRef, &value)
assert.NoError(t, err)
- assert.True(t, proto.Equal(&value, getTestArtifact().Data[0].Value))
+ assert.True(t, proto.Equal(&value, getTestArtifact().GetData()[0].GetValue()))
})
t.Run("Dataset does not exist", func(t *testing.T) {
@@ -258,7 +258,7 @@ func TestCreateArtifact(t *testing.T) {
request := &datacatalog.CreateArtifactRequest{
Artifact: &datacatalog.Artifact{
// missing artifact id
- Dataset: getTestDataset().Id,
+ Dataset: getTestDataset().GetId(),
},
}
@@ -273,7 +273,7 @@ func TestCreateArtifact(t *testing.T) {
request := &datacatalog.CreateArtifactRequest{
Artifact: &datacatalog.Artifact{
Id: "test",
- Dataset: getTestDataset().Id,
+ Dataset: getTestDataset().GetId(),
// missing artifactData
},
}
@@ -294,13 +294,13 @@ func TestCreateArtifact(t *testing.T) {
mock.MatchedBy(func(ctx context.Context) bool { return true }),
mock.MatchedBy(func(artifact models.Artifact) bool {
expectedArtifact := getTestArtifact()
- return artifact.ArtifactID == expectedArtifact.Id &&
+ return artifact.ArtifactID == expectedArtifact.GetId() &&
artifact.SerializedMetadata != nil &&
- len(artifact.ArtifactData) == len(expectedArtifact.Data) &&
- artifact.ArtifactKey.DatasetProject == expectedArtifact.Dataset.Project &&
- artifact.ArtifactKey.DatasetDomain == expectedArtifact.Dataset.Domain &&
- artifact.ArtifactKey.DatasetName == expectedArtifact.Dataset.Name &&
- artifact.ArtifactKey.DatasetVersion == expectedArtifact.Dataset.Version
+ len(artifact.ArtifactData) == len(expectedArtifact.GetData()) &&
+ artifact.ArtifactKey.DatasetProject == expectedArtifact.GetDataset().GetProject() &&
+ artifact.ArtifactKey.DatasetDomain == expectedArtifact.GetDataset().GetDomain() &&
+ artifact.ArtifactKey.DatasetName == expectedArtifact.GetDataset().GetName() &&
+ artifact.ArtifactKey.DatasetVersion == expectedArtifact.GetDataset().GetVersion()
})).Return(status.Error(codes.AlreadyExists, "test already exists"))
request := &datacatalog.CreateArtifactRequest{Artifact: getTestArtifact()}
@@ -338,10 +338,10 @@ func TestCreateArtifact(t *testing.T) {
dcRepo := newMockDataCatalogRepo()
mockDatasetModel := models.Dataset{
DatasetKey: models.DatasetKey{
- Project: expectedDataset.Id.Project,
- Domain: expectedDataset.Id.Domain,
- Name: expectedDataset.Id.Name,
- Version: expectedDataset.Id.Version,
+ Project: expectedDataset.GetId().GetProject(),
+ Domain: expectedDataset.GetId().GetDomain(),
+ Name: expectedDataset.GetId().GetName(),
+ Version: expectedDataset.GetId().GetVersion(),
},
}
dcRepo.MockDatasetRepo.On("Get", mock.Anything, mock.Anything).Return(mockDatasetModel, nil)
@@ -392,21 +392,21 @@ func TestGetArtifact(t *testing.T) {
t.Run("Get by Id", func(t *testing.T) {
dcRepo.MockArtifactRepo.On("Get", mock.Anything,
mock.MatchedBy(func(artifactKey models.ArtifactKey) bool {
- return artifactKey.ArtifactID == expectedArtifact.Id &&
- artifactKey.DatasetProject == expectedArtifact.Dataset.Project &&
- artifactKey.DatasetDomain == expectedArtifact.Dataset.Domain &&
- artifactKey.DatasetVersion == expectedArtifact.Dataset.Version &&
- artifactKey.DatasetName == expectedArtifact.Dataset.Name
+ return artifactKey.ArtifactID == expectedArtifact.GetId() &&
+ artifactKey.DatasetProject == expectedArtifact.GetDataset().GetProject() &&
+ artifactKey.DatasetDomain == expectedArtifact.GetDataset().GetDomain() &&
+ artifactKey.DatasetVersion == expectedArtifact.GetDataset().GetVersion() &&
+ artifactKey.DatasetName == expectedArtifact.GetDataset().GetName()
})).Return(mockArtifactModel, nil)
artifactManager := NewArtifactManager(dcRepo, datastore, testStoragePrefix, mockScope.NewTestScope())
artifactResponse, err := artifactManager.GetArtifact(ctx, &datacatalog.GetArtifactRequest{
- Dataset: getTestDataset().Id,
- QueryHandle: &datacatalog.GetArtifactRequest_ArtifactId{ArtifactId: expectedArtifact.Id},
+ Dataset: getTestDataset().GetId(),
+ QueryHandle: &datacatalog.GetArtifactRequest_ArtifactId{ArtifactId: expectedArtifact.GetId()},
})
assert.NoError(t, err)
- assert.True(t, proto.Equal(expectedArtifact, artifactResponse.Artifact))
+ assert.True(t, proto.Equal(expectedArtifact, artifactResponse.GetArtifact()))
})
t.Run("Get by Artifact Tag", func(t *testing.T) {
@@ -434,16 +434,16 @@ func TestGetArtifact(t *testing.T) {
artifactManager := NewArtifactManager(dcRepo, datastore, testStoragePrefix, mockScope.NewTestScope())
artifactResponse, err := artifactManager.GetArtifact(ctx, &datacatalog.GetArtifactRequest{
- Dataset: getTestDataset().Id,
+ Dataset: getTestDataset().GetId(),
QueryHandle: &datacatalog.GetArtifactRequest_TagName{TagName: expectedTag.TagName},
})
assert.NoError(t, err)
- assert.True(t, proto.Equal(expectedArtifact, artifactResponse.Artifact))
+ assert.True(t, proto.Equal(expectedArtifact, artifactResponse.GetArtifact()))
})
t.Run("Get missing input", func(t *testing.T) {
artifactManager := NewArtifactManager(dcRepo, datastore, testStoragePrefix, mockScope.NewTestScope())
- artifactResponse, err := artifactManager.GetArtifact(ctx, &datacatalog.GetArtifactRequest{Dataset: getTestDataset().Id})
+ artifactResponse, err := artifactManager.GetArtifact(ctx, &datacatalog.GetArtifactRequest{Dataset: getTestDataset().GetId()})
assert.Error(t, err)
assert.Nil(t, artifactResponse)
responseCode := status.Code(err)
@@ -454,7 +454,7 @@ func TestGetArtifact(t *testing.T) {
dcRepo.MockTagRepo.On("Get", mock.Anything, mock.Anything).Return(
models.Tag{}, errors.NewDataCatalogError(codes.NotFound, "tag with artifact does not exist"))
artifactManager := NewArtifactManager(dcRepo, datastore, testStoragePrefix, mockScope.NewTestScope())
- artifactResponse, err := artifactManager.GetArtifact(ctx, &datacatalog.GetArtifactRequest{Dataset: getTestDataset().Id, QueryHandle: &datacatalog.GetArtifactRequest_TagName{TagName: "test"}})
+ artifactResponse, err := artifactManager.GetArtifact(ctx, &datacatalog.GetArtifactRequest{Dataset: getTestDataset().GetId(), QueryHandle: &datacatalog.GetArtifactRequest_TagName{TagName: "test"}})
assert.Error(t, err)
assert.Nil(t, artifactResponse)
responseCode := status.Code(err)
@@ -473,11 +473,11 @@ func TestListArtifact(t *testing.T) {
expectedDataset := getTestDataset()
mockDatasetModel := models.Dataset{
DatasetKey: models.DatasetKey{
- Project: expectedDataset.Id.Project,
- Domain: expectedDataset.Id.Domain,
- Name: expectedDataset.Id.Name,
- Version: expectedDataset.Id.Version,
- UUID: expectedDataset.Id.UUID,
+ Project: expectedDataset.GetId().GetProject(),
+ Domain: expectedDataset.GetId().GetDomain(),
+ Name: expectedDataset.GetId().GetName(),
+ Version: expectedDataset.GetId().GetVersion(),
+ UUID: expectedDataset.GetId().GetUUID(),
},
}
@@ -500,7 +500,7 @@ func TestListArtifact(t *testing.T) {
},
}
- artifactResponse, err := artifactManager.ListArtifacts(ctx, &datacatalog.ListArtifactsRequest{Dataset: getTestDataset().Id, Filter: filter})
+ artifactResponse, err := artifactManager.ListArtifacts(ctx, &datacatalog.ListArtifactsRequest{Dataset: getTestDataset().GetId(), Filter: filter})
assert.Error(t, err)
assert.Nil(t, artifactResponse)
responseCode := status.Code(err)
@@ -543,10 +543,10 @@ func TestListArtifact(t *testing.T) {
dcRepo.MockDatasetRepo.On("Get", mock.Anything,
mock.MatchedBy(func(dataset models.DatasetKey) bool {
- return dataset.Project == expectedDataset.Id.Project &&
- dataset.Domain == expectedDataset.Id.Domain &&
- dataset.Name == expectedDataset.Id.Name &&
- dataset.Version == expectedDataset.Id.Version
+ return dataset.Project == expectedDataset.GetId().GetProject() &&
+ dataset.Domain == expectedDataset.GetId().GetDomain() &&
+ dataset.Name == expectedDataset.GetId().GetName() &&
+ dataset.Version == expectedDataset.GetId().GetVersion()
})).Return(mockDatasetModel, nil)
mockArtifacts := []models.Artifact{
@@ -556,10 +556,10 @@ func TestListArtifact(t *testing.T) {
dcRepo.MockArtifactRepo.On("List", mock.Anything,
mock.MatchedBy(func(dataset models.DatasetKey) bool {
- return dataset.Project == expectedDataset.Id.Project &&
- dataset.Domain == expectedDataset.Id.Domain &&
- dataset.Name == expectedDataset.Id.Name &&
- dataset.Version == expectedDataset.Id.Version
+ return dataset.Project == expectedDataset.GetId().GetProject() &&
+ dataset.Domain == expectedDataset.GetId().GetDomain() &&
+ dataset.Name == expectedDataset.GetId().GetName() &&
+ dataset.Version == expectedDataset.GetId().GetVersion()
}),
mock.MatchedBy(func(listInput models.ListModelsInput) bool {
return len(listInput.ModelFilters) == 3 &&
@@ -573,7 +573,7 @@ func TestListArtifact(t *testing.T) {
listInput.Offset == 0
})).Return(mockArtifacts, nil)
- artifactResponse, err := artifactManager.ListArtifacts(ctx, &datacatalog.ListArtifactsRequest{Dataset: expectedDataset.Id, Filter: filter})
+ artifactResponse, err := artifactManager.ListArtifacts(ctx, &datacatalog.ListArtifactsRequest{Dataset: expectedDataset.GetId(), Filter: filter})
assert.NoError(t, err)
assert.NotEmpty(t, artifactResponse)
})
@@ -584,10 +584,10 @@ func TestListArtifact(t *testing.T) {
dcRepo.MockDatasetRepo.On("Get", mock.Anything,
mock.MatchedBy(func(dataset models.DatasetKey) bool {
- return dataset.Project == expectedDataset.Id.Project &&
- dataset.Domain == expectedDataset.Id.Domain &&
- dataset.Name == expectedDataset.Id.Name &&
- dataset.Version == expectedDataset.Id.Version
+ return dataset.Project == expectedDataset.GetId().GetProject() &&
+ dataset.Domain == expectedDataset.GetId().GetDomain() &&
+ dataset.Name == expectedDataset.GetId().GetName() &&
+ dataset.Version == expectedDataset.GetId().GetVersion()
})).Return(mockDatasetModel, nil)
mockArtifacts := []models.Artifact{
@@ -596,16 +596,16 @@ func TestListArtifact(t *testing.T) {
}
dcRepo.MockArtifactRepo.On("List", mock.Anything,
mock.MatchedBy(func(dataset models.DatasetKey) bool {
- return dataset.Project == expectedDataset.Id.Project &&
- dataset.Domain == expectedDataset.Id.Domain &&
- dataset.Name == expectedDataset.Id.Name &&
- dataset.Version == expectedDataset.Id.Version
+ return dataset.Project == expectedDataset.GetId().GetProject() &&
+ dataset.Domain == expectedDataset.GetId().GetDomain() &&
+ dataset.Name == expectedDataset.GetId().GetName() &&
+ dataset.Version == expectedDataset.GetId().GetVersion()
}),
mock.MatchedBy(func(listInput models.ListModelsInput) bool {
return len(listInput.ModelFilters) == 0
})).Return(mockArtifacts, nil)
- artifactResponse, err := artifactManager.ListArtifacts(ctx, &datacatalog.ListArtifactsRequest{Dataset: expectedDataset.Id, Filter: filter})
+ artifactResponse, err := artifactManager.ListArtifacts(ctx, &datacatalog.ListArtifactsRequest{Dataset: expectedDataset.GetId(), Filter: filter})
assert.NoError(t, err)
assert.NotEmpty(t, artifactResponse)
})
@@ -634,11 +634,11 @@ func TestUpdateArtifact(t *testing.T) {
dcRepo.MockArtifactRepo.On("Get",
mock.MatchedBy(func(ctx context.Context) bool { return true }),
mock.MatchedBy(func(artifactKey models.ArtifactKey) bool {
- return artifactKey.ArtifactID == expectedArtifact.Id &&
- artifactKey.DatasetProject == expectedArtifact.Dataset.Project &&
- artifactKey.DatasetDomain == expectedArtifact.Dataset.Domain &&
- artifactKey.DatasetName == expectedArtifact.Dataset.Name &&
- artifactKey.DatasetVersion == expectedArtifact.Dataset.Version
+ return artifactKey.ArtifactID == expectedArtifact.GetId() &&
+ artifactKey.DatasetProject == expectedArtifact.GetDataset().GetProject() &&
+ artifactKey.DatasetDomain == expectedArtifact.GetDataset().GetDomain() &&
+ artifactKey.DatasetName == expectedArtifact.GetDataset().GetName() &&
+ artifactKey.DatasetVersion == expectedArtifact.GetDataset().GetVersion()
})).Return(mockArtifactModel, nil)
metaData := &datacatalog.Metadata{
@@ -650,18 +650,18 @@ func TestUpdateArtifact(t *testing.T) {
dcRepo.MockArtifactRepo.On("Update",
mock.MatchedBy(func(ctx context.Context) bool { return true }),
mock.MatchedBy(func(artifact models.Artifact) bool {
- return artifact.ArtifactID == expectedArtifact.Id &&
- artifact.ArtifactKey.DatasetProject == expectedArtifact.Dataset.Project &&
- artifact.ArtifactKey.DatasetDomain == expectedArtifact.Dataset.Domain &&
- artifact.ArtifactKey.DatasetName == expectedArtifact.Dataset.Name &&
- artifact.ArtifactKey.DatasetVersion == expectedArtifact.Dataset.Version &&
+ return artifact.ArtifactID == expectedArtifact.GetId() &&
+ artifact.ArtifactKey.DatasetProject == expectedArtifact.GetDataset().GetProject() &&
+ artifact.ArtifactKey.DatasetDomain == expectedArtifact.GetDataset().GetDomain() &&
+ artifact.ArtifactKey.DatasetName == expectedArtifact.GetDataset().GetName() &&
+ artifact.ArtifactKey.DatasetVersion == expectedArtifact.GetDataset().GetVersion() &&
reflect.DeepEqual(artifact.SerializedMetadata, serializedMetadata)
})).Return(nil)
request := &datacatalog.UpdateArtifactRequest{
- Dataset: expectedDataset.Id,
+ Dataset: expectedDataset.GetId(),
QueryHandle: &datacatalog.UpdateArtifactRequest_ArtifactId{
- ArtifactId: expectedArtifact.Id,
+ ArtifactId: expectedArtifact.GetId(),
},
Data: []*datacatalog.ArtifactData{
{
@@ -682,7 +682,7 @@ func TestUpdateArtifact(t *testing.T) {
artifactResponse, err := artifactManager.UpdateArtifact(ctx, request)
assert.NoError(t, err)
assert.NotNil(t, artifactResponse)
- assert.Equal(t, expectedArtifact.Id, artifactResponse.GetArtifactId())
+ assert.Equal(t, expectedArtifact.GetId(), artifactResponse.GetArtifactId())
dcRepo.MockArtifactRepo.AssertExpectations(t)
// check that the datastore has the updated artifactData available
@@ -724,11 +724,11 @@ func TestUpdateArtifact(t *testing.T) {
dcRepo.MockArtifactRepo.On("Update",
mock.MatchedBy(func(ctx context.Context) bool { return true }),
mock.MatchedBy(func(artifact models.Artifact) bool {
- return artifact.ArtifactID == expectedArtifact.Id &&
- artifact.ArtifactKey.DatasetProject == expectedArtifact.Dataset.Project &&
- artifact.ArtifactKey.DatasetDomain == expectedArtifact.Dataset.Domain &&
- artifact.ArtifactKey.DatasetName == expectedArtifact.Dataset.Name &&
- artifact.ArtifactKey.DatasetVersion == expectedArtifact.Dataset.Version &&
+ return artifact.ArtifactID == expectedArtifact.GetId() &&
+ artifact.ArtifactKey.DatasetProject == expectedArtifact.GetDataset().GetProject() &&
+ artifact.ArtifactKey.DatasetDomain == expectedArtifact.GetDataset().GetDomain() &&
+ artifact.ArtifactKey.DatasetName == expectedArtifact.GetDataset().GetName() &&
+ artifact.ArtifactKey.DatasetVersion == expectedArtifact.GetDataset().GetVersion() &&
reflect.DeepEqual(artifact.SerializedMetadata, serializedMetadata)
})).Return(nil)
@@ -753,7 +753,7 @@ func TestUpdateArtifact(t *testing.T) {
}, nil)
request := &datacatalog.UpdateArtifactRequest{
- Dataset: expectedDataset.Id,
+ Dataset: expectedDataset.GetId(),
QueryHandle: &datacatalog.UpdateArtifactRequest_TagName{
TagName: expectedTag.TagName,
},
@@ -776,7 +776,7 @@ func TestUpdateArtifact(t *testing.T) {
artifactResponse, err := artifactManager.UpdateArtifact(ctx, request)
assert.NoError(t, err)
assert.NotNil(t, artifactResponse)
- assert.Equal(t, expectedArtifact.Id, artifactResponse.GetArtifactId())
+ assert.Equal(t, expectedArtifact.GetId(), artifactResponse.GetArtifactId())
dcRepo.MockArtifactRepo.AssertExpectations(t)
// check that the datastore has the updated artifactData available
@@ -809,14 +809,14 @@ func TestUpdateArtifact(t *testing.T) {
dcRepo := newMockDataCatalogRepo()
dcRepo.MockArtifactRepo.On("Get", mock.Anything, mock.Anything).Return(models.Artifact{}, repoErrors.GetMissingEntityError("Artifact", &datacatalog.Artifact{
- Dataset: expectedDataset.Id,
- Id: expectedArtifact.Id,
+ Dataset: expectedDataset.GetId(),
+ Id: expectedArtifact.GetId(),
}))
request := &datacatalog.UpdateArtifactRequest{
- Dataset: expectedDataset.Id,
+ Dataset: expectedDataset.GetId(),
QueryHandle: &datacatalog.UpdateArtifactRequest_ArtifactId{
- ArtifactId: expectedArtifact.Id,
+ ArtifactId: expectedArtifact.GetId(),
},
Data: []*datacatalog.ArtifactData{
{
@@ -844,7 +844,7 @@ func TestUpdateArtifact(t *testing.T) {
dcRepo := newMockDataCatalogRepo()
request := &datacatalog.UpdateArtifactRequest{
- Dataset: expectedDataset.Id,
+ Dataset: expectedDataset.GetId(),
QueryHandle: &datacatalog.UpdateArtifactRequest_ArtifactId{},
Data: []*datacatalog.ArtifactData{
{
@@ -872,7 +872,7 @@ func TestUpdateArtifact(t *testing.T) {
dcRepo := newMockDataCatalogRepo()
request := &datacatalog.UpdateArtifactRequest{
- Dataset: expectedDataset.Id,
+ Dataset: expectedDataset.GetId(),
QueryHandle: &datacatalog.UpdateArtifactRequest_TagName{},
Data: []*datacatalog.ArtifactData{
{
@@ -900,9 +900,9 @@ func TestUpdateArtifact(t *testing.T) {
dcRepo := newMockDataCatalogRepo()
request := &datacatalog.UpdateArtifactRequest{
- Dataset: expectedDataset.Id,
+ Dataset: expectedDataset.GetId(),
QueryHandle: &datacatalog.UpdateArtifactRequest_ArtifactId{
- ArtifactId: expectedArtifact.Id,
+ ArtifactId: expectedArtifact.GetId(),
},
Data: nil,
}
@@ -921,9 +921,9 @@ func TestUpdateArtifact(t *testing.T) {
dcRepo := newMockDataCatalogRepo()
request := &datacatalog.UpdateArtifactRequest{
- Dataset: expectedDataset.Id,
+ Dataset: expectedDataset.GetId(),
QueryHandle: &datacatalog.UpdateArtifactRequest_ArtifactId{
- ArtifactId: expectedArtifact.Id,
+ ArtifactId: expectedArtifact.GetId(),
},
Data: []*datacatalog.ArtifactData{},
}
diff --git a/datacatalog/pkg/manager/impl/dataset_manager.go b/datacatalog/pkg/manager/impl/dataset_manager.go
index 0db84d6360..8caca3f3a3 100644
--- a/datacatalog/pkg/manager/impl/dataset_manager.go
+++ b/datacatalog/pkg/manager/impl/dataset_manager.go
@@ -44,12 +44,12 @@ type datasetManager struct {
func (dm *datasetManager) validateCreateRequest(request *datacatalog.CreateDatasetRequest) error {
errorSet := make([]error, 0)
- err := validators.ValidateDatasetID(request.Dataset.Id)
+ err := validators.ValidateDatasetID(request.GetDataset().GetId())
if err != nil {
errorSet = append(errorSet, err)
}
- err = validators.ValidateUniquePartitionKeys(request.Dataset.PartitionKeys)
+ err = validators.ValidateUniquePartitionKeys(request.GetDataset().GetPartitionKeys())
if err != nil {
errorSet = append(errorSet, err)
}
@@ -71,7 +71,7 @@ func (dm *datasetManager) CreateDataset(ctx context.Context, request *datacatalo
return nil, err
}
- datasetModel, err := transformers.CreateDatasetModel(request.Dataset)
+ datasetModel, err := transformers.CreateDatasetModel(request.GetDataset())
if err != nil {
logger.Errorf(ctx, "Unable to transform create dataset request %+v err: %v", request, err)
dm.systemMetrics.transformerErrorCounter.Inc(ctx)
@@ -81,7 +81,7 @@ func (dm *datasetManager) CreateDataset(ctx context.Context, request *datacatalo
err = dm.repo.DatasetRepo().Create(ctx, *datasetModel)
if err != nil {
if errors.IsAlreadyExistsError(err) {
- logger.Warnf(ctx, "Dataset already exists key: %+v, err %v", request.Dataset, err)
+ logger.Warnf(ctx, "Dataset already exists key: %+v, err %v", request.GetDataset(), err)
dm.systemMetrics.alreadyExistsCounter.Inc(ctx)
} else {
logger.Errorf(ctx, "Failed to create dataset model: %+v err: %v", datasetModel, err)
@@ -90,7 +90,7 @@ func (dm *datasetManager) CreateDataset(ctx context.Context, request *datacatalo
return nil, err
}
- logger.Debugf(ctx, "Successfully created dataset %+v", request.Dataset)
+ logger.Debugf(ctx, "Successfully created dataset %+v", request.GetDataset())
dm.systemMetrics.createSuccessCounter.Inc(ctx)
return &datacatalog.CreateDatasetResponse{}, nil
}
@@ -100,14 +100,14 @@ func (dm *datasetManager) GetDataset(ctx context.Context, request *datacatalog.G
timer := dm.systemMetrics.getResponseTime.Start(ctx)
defer timer.Stop()
- err := validators.ValidateDatasetID(request.Dataset)
+ err := validators.ValidateDatasetID(request.GetDataset())
if err != nil {
logger.Warnf(ctx, "Invalid get dataset request %+v err: %v", request, err)
dm.systemMetrics.validationErrorCounter.Inc(ctx)
return nil, err
}
- datasetKey := transformers.FromDatasetID(request.Dataset)
+ datasetKey := transformers.FromDatasetID(request.GetDataset())
datasetModel, err := dm.repo.DatasetRepo().Get(ctx, datasetKey)
if err != nil {
@@ -150,7 +150,7 @@ func (dm *datasetManager) ListDatasets(ctx context.Context, request *datacatalog
return nil, err
}
- err = transformers.ApplyPagination(request.Pagination, &listInput)
+ err = transformers.ApplyPagination(request.GetPagination(), &listInput)
if err != nil {
logger.Warningf(ctx, "Invalid pagination options in list datasets request %v, err: %v", request, err)
dm.systemMetrics.validationErrorCounter.Inc(ctx)
@@ -171,7 +171,7 @@ func (dm *datasetManager) ListDatasets(ctx context.Context, request *datacatalog
for idx, datasetModel := range datasetModels {
dataset, err := transformers.FromDatasetModel(datasetModel)
if err != nil {
- logger.Errorf(ctx, "Unable to transform Dataset %+v err: %v", dataset.Id, err)
+ logger.Errorf(ctx, "Unable to transform Dataset %+v err: %v", dataset.GetId(), err)
transformerErrs = append(transformerErrs, err)
}
diff --git a/datacatalog/pkg/manager/impl/dataset_manager_test.go b/datacatalog/pkg/manager/impl/dataset_manager_test.go
index 2ebd107304..9d668fdef1 100644
--- a/datacatalog/pkg/manager/impl/dataset_manager_test.go
+++ b/datacatalog/pkg/manager/impl/dataset_manager_test.go
@@ -58,13 +58,13 @@ func TestCreateDataset(t *testing.T) {
mock.MatchedBy(func(ctx context.Context) bool { return true }),
mock.MatchedBy(func(dataset models.Dataset) bool {
- return dataset.Name == expectedDataset.Id.Name &&
- dataset.Project == expectedDataset.Id.Project &&
- dataset.Domain == expectedDataset.Id.Domain &&
- dataset.Version == expectedDataset.Id.Version &&
- len(dataset.PartitionKeys) == len(expectedDataset.PartitionKeys) &&
- dataset.PartitionKeys[0].Name == expectedDataset.PartitionKeys[0] &&
- dataset.PartitionKeys[1].Name == expectedDataset.PartitionKeys[1]
+ return dataset.Name == expectedDataset.GetId().GetName() &&
+ dataset.Project == expectedDataset.GetId().GetProject() &&
+ dataset.Domain == expectedDataset.GetId().GetDomain() &&
+ dataset.Version == expectedDataset.GetId().GetVersion() &&
+ len(dataset.PartitionKeys) == len(expectedDataset.GetPartitionKeys()) &&
+ dataset.PartitionKeys[0].Name == expectedDataset.GetPartitionKeys()[0] &&
+ dataset.PartitionKeys[1].Name == expectedDataset.GetPartitionKeys()[1]
})).Return(nil)
request := &datacatalog.CreateDatasetRequest{Dataset: expectedDataset}
datasetResponse, err := datasetManager.CreateDataset(context.Background(), request)
@@ -79,10 +79,10 @@ func TestCreateDataset(t *testing.T) {
mock.MatchedBy(func(ctx context.Context) bool { return true }),
mock.MatchedBy(func(dataset models.Dataset) bool {
- return dataset.Name == expectedDataset.Id.Name &&
- dataset.Project == expectedDataset.Id.Project &&
- dataset.Domain == expectedDataset.Id.Domain &&
- dataset.Version == expectedDataset.Id.Version &&
+ return dataset.Name == expectedDataset.GetId().GetName() &&
+ dataset.Project == expectedDataset.GetId().GetProject() &&
+ dataset.Domain == expectedDataset.GetId().GetDomain() &&
+ dataset.Version == expectedDataset.GetId().GetVersion() &&
len(dataset.PartitionKeys) == 0
})).Return(nil)
@@ -132,7 +132,7 @@ func TestCreateDataset(t *testing.T) {
t.Run("DuplicatePartition", func(t *testing.T) {
dcRepo := getDataCatalogRepo()
badDataset := getTestDataset()
- badDataset.PartitionKeys = append(badDataset.PartitionKeys, badDataset.PartitionKeys[0])
+ badDataset.PartitionKeys = append(badDataset.PartitionKeys, badDataset.GetPartitionKeys()[0])
datasetManager := NewDatasetManager(dcRepo, nil, mockScope.NewTestScope())
dcRepo.MockDatasetRepo.On("Create",
@@ -162,17 +162,17 @@ func TestGetDataset(t *testing.T) {
mock.MatchedBy(func(ctx context.Context) bool { return true }),
mock.MatchedBy(func(datasetKey models.DatasetKey) bool {
- return datasetKey.Name == expectedDataset.Id.Name &&
- datasetKey.Project == expectedDataset.Id.Project &&
- datasetKey.Domain == expectedDataset.Id.Domain &&
- datasetKey.Version == expectedDataset.Id.Version
+ return datasetKey.Name == expectedDataset.GetId().GetName() &&
+ datasetKey.Project == expectedDataset.GetId().GetProject() &&
+ datasetKey.Domain == expectedDataset.GetId().GetDomain() &&
+ datasetKey.Version == expectedDataset.GetId().GetVersion()
})).Return(*datasetModelResponse, nil)
- request := &datacatalog.GetDatasetRequest{Dataset: getTestDataset().Id}
+ request := &datacatalog.GetDatasetRequest{Dataset: getTestDataset().GetId()}
datasetResponse, err := datasetManager.GetDataset(context.Background(), request)
assert.NoError(t, err)
assert.NotNil(t, datasetResponse)
- assert.True(t, proto.Equal(datasetResponse.Dataset, expectedDataset))
- assert.EqualValues(t, datasetResponse.Dataset.Metadata.KeyMap, expectedDataset.Metadata.KeyMap)
+ assert.True(t, proto.Equal(datasetResponse.GetDataset(), expectedDataset))
+ assert.EqualValues(t, datasetResponse.GetDataset().GetMetadata().GetKeyMap(), expectedDataset.GetMetadata().GetKeyMap())
})
t.Run("Does not exist", func(t *testing.T) {
@@ -183,12 +183,12 @@ func TestGetDataset(t *testing.T) {
mock.MatchedBy(func(ctx context.Context) bool { return true }),
mock.MatchedBy(func(datasetKey models.DatasetKey) bool {
- return datasetKey.Name == expectedDataset.Id.Name &&
- datasetKey.Project == expectedDataset.Id.Project &&
- datasetKey.Domain == expectedDataset.Id.Domain &&
- datasetKey.Version == expectedDataset.Id.Version
+ return datasetKey.Name == expectedDataset.GetId().GetName() &&
+ datasetKey.Project == expectedDataset.GetId().GetProject() &&
+ datasetKey.Domain == expectedDataset.GetId().GetDomain() &&
+ datasetKey.Version == expectedDataset.GetId().GetVersion()
})).Return(models.Dataset{}, errors.NewDataCatalogError(codes.NotFound, "dataset does not exist"))
- request := &datacatalog.GetDatasetRequest{Dataset: getTestDataset().Id}
+ request := &datacatalog.GetDatasetRequest{Dataset: getTestDataset().GetId()}
_, err := datasetManager.GetDataset(context.Background(), request)
assert.Error(t, err)
responseCode := status.Code(err)
@@ -267,7 +267,7 @@ func TestListDatasets(t *testing.T) {
datasetResponse, err := datasetManager.ListDatasets(ctx, &datacatalog.ListDatasetsRequest{Filter: filter})
assert.NoError(t, err)
assert.NotEmpty(t, datasetResponse)
- assert.Len(t, datasetResponse.Datasets, 1)
+ assert.Len(t, datasetResponse.GetDatasets(), 1)
})
t.Run("List Datasets with no filtering", func(t *testing.T) {
@@ -286,6 +286,6 @@ func TestListDatasets(t *testing.T) {
datasetResponse, err := datasetManager.ListDatasets(ctx, &datacatalog.ListDatasetsRequest{})
assert.NoError(t, err)
assert.NotEmpty(t, datasetResponse)
- assert.Len(t, datasetResponse.Datasets, 1)
+ assert.Len(t, datasetResponse.GetDatasets(), 1)
})
}
diff --git a/datacatalog/pkg/manager/impl/reservation_manager.go b/datacatalog/pkg/manager/impl/reservation_manager.go
index 394ad5a55d..62dbb25668 100644
--- a/datacatalog/pkg/manager/impl/reservation_manager.go
+++ b/datacatalog/pkg/manager/impl/reservation_manager.go
@@ -88,7 +88,7 @@ func NewReservationManager(
// Attempt to acquire a reservation for the specified artifact. If there is not active reservation, successfully
// acquire it. If you are the owner of the active reservation, extend it. If another owner, return the existing reservation.
func (r *reservationManager) GetOrExtendReservation(ctx context.Context, request *datacatalog.GetOrExtendReservationRequest) (*datacatalog.GetOrExtendReservationResponse, error) {
- reservationID := request.ReservationId
+ reservationID := request.GetReservationId()
// Use minimum of maxHeartbeatInterval and requested heartbeat interval
heartbeatInterval := r.maxHeartbeatInterval
@@ -97,7 +97,7 @@ func (r *reservationManager) GetOrExtendReservation(ctx context.Context, request
heartbeatInterval = requestHeartbeatInterval.AsDuration()
}
- reservation, err := r.tryAcquireReservation(ctx, reservationID, request.OwnerId, heartbeatInterval)
+ reservation, err := r.tryAcquireReservation(ctx, reservationID, request.GetOwnerId(), heartbeatInterval)
if err != nil {
r.systemMetrics.acquireReservationFailure.Inc(ctx)
return nil, err
@@ -189,12 +189,12 @@ func (r *reservationManager) tryAcquireReservation(ctx context.Context, reservat
// Release an active reservation with the specified owner. If one does not exist, gracefully return.
func (r *reservationManager) ReleaseReservation(ctx context.Context, request *datacatalog.ReleaseReservationRequest) (*datacatalog.ReleaseReservationResponse, error) {
repo := r.repo.ReservationRepo()
- reservationKey := transformers.FromReservationID(request.ReservationId)
+ reservationKey := transformers.FromReservationID(request.GetReservationId())
- err := repo.Delete(ctx, reservationKey, request.OwnerId)
+ err := repo.Delete(ctx, reservationKey, request.GetOwnerId())
if err != nil {
if errors.IsDoesNotExistError(err) {
- logger.Warnf(ctx, "Reservation does not exist id: %+v, err %v", request.ReservationId, err)
+ logger.Warnf(ctx, "Reservation does not exist id: %+v, err %v", request.GetReservationId(), err)
r.systemMetrics.reservationDoesNotExist.Inc(ctx)
return &datacatalog.ReleaseReservationResponse{}, nil
}
diff --git a/datacatalog/pkg/manager/impl/reservation_manager_test.go b/datacatalog/pkg/manager/impl/reservation_manager_test.go
index 0dd7408792..1281b7df89 100644
--- a/datacatalog/pkg/manager/impl/reservation_manager_test.go
+++ b/datacatalog/pkg/manager/impl/reservation_manager_test.go
@@ -50,10 +50,10 @@ func TestGetOrExtendReservation_CreateReservation(t *testing.T) {
dcRepo.MockReservationRepo.On("Get",
mock.MatchedBy(func(ctx context.Context) bool { return true }),
mock.MatchedBy(func(key models.ReservationKey) bool {
- return key.DatasetProject == datasetID.Project &&
- key.DatasetDomain == datasetID.Domain &&
- key.DatasetVersion == datasetID.Version &&
- key.DatasetName == datasetID.Name &&
+ return key.DatasetProject == datasetID.GetProject() &&
+ key.DatasetDomain == datasetID.GetDomain() &&
+ key.DatasetVersion == datasetID.GetVersion() &&
+ key.DatasetName == datasetID.GetName() &&
key.TagName == tagName
})).Return(models.Reservation{}, errors2.NewDataCatalogErrorf(codes.NotFound, "entry not found"))
@@ -62,10 +62,10 @@ func TestGetOrExtendReservation_CreateReservation(t *testing.T) {
dcRepo.MockReservationRepo.On("Create",
mock.MatchedBy(func(ctx context.Context) bool { return true }),
mock.MatchedBy(func(reservation models.Reservation) bool {
- return reservation.DatasetProject == datasetID.Project &&
- reservation.DatasetDomain == datasetID.Domain &&
- reservation.DatasetName == datasetID.Name &&
- reservation.DatasetVersion == datasetID.Version &&
+ return reservation.DatasetProject == datasetID.GetProject() &&
+ reservation.DatasetDomain == datasetID.GetDomain() &&
+ reservation.DatasetName == datasetID.GetName() &&
+ reservation.DatasetVersion == datasetID.GetVersion() &&
reservation.TagName == tagName &&
reservation.OwnerID == currentOwner &&
reservation.ExpiresAt == now.Add(heartbeatInterval*heartbeatGracePeriodMultiplier)
@@ -86,8 +86,8 @@ func TestGetOrExtendReservation_CreateReservation(t *testing.T) {
resp, err := reservationManager.GetOrExtendReservation(context.Background(), &req)
assert.Nil(t, err)
- assert.Equal(t, currentOwner, resp.GetReservation().OwnerId)
- assert.Equal(t, heartbeatIntervalPb, resp.GetReservation().HeartbeatInterval)
+ assert.Equal(t, currentOwner, resp.GetReservation().GetOwnerId())
+ assert.Equal(t, heartbeatIntervalPb, resp.GetReservation().GetHeartbeatInterval())
}
func TestGetOrExtendReservation_MaxHeartbeatInterval(t *testing.T) {
@@ -98,10 +98,10 @@ func TestGetOrExtendReservation_MaxHeartbeatInterval(t *testing.T) {
dcRepo.MockReservationRepo.On("Get",
mock.MatchedBy(func(ctx context.Context) bool { return true }),
mock.MatchedBy(func(key models.ReservationKey) bool {
- return key.DatasetProject == datasetID.Project &&
- key.DatasetDomain == datasetID.Domain &&
- key.DatasetVersion == datasetID.Version &&
- key.DatasetName == datasetID.Name &&
+ return key.DatasetProject == datasetID.GetProject() &&
+ key.DatasetDomain == datasetID.GetDomain() &&
+ key.DatasetVersion == datasetID.GetVersion() &&
+ key.DatasetName == datasetID.GetName() &&
key.TagName == tagName
})).Return(models.Reservation{}, errors2.NewDataCatalogErrorf(codes.NotFound, "entry not found"))
@@ -110,10 +110,10 @@ func TestGetOrExtendReservation_MaxHeartbeatInterval(t *testing.T) {
dcRepo.MockReservationRepo.On("Create",
mock.MatchedBy(func(ctx context.Context) bool { return true }),
mock.MatchedBy(func(reservation models.Reservation) bool {
- return reservation.DatasetProject == datasetID.Project &&
- reservation.DatasetDomain == datasetID.Domain &&
- reservation.DatasetName == datasetID.Name &&
- reservation.DatasetVersion == datasetID.Version &&
+ return reservation.DatasetProject == datasetID.GetProject() &&
+ reservation.DatasetDomain == datasetID.GetDomain() &&
+ reservation.DatasetName == datasetID.GetName() &&
+ reservation.DatasetVersion == datasetID.GetVersion() &&
reservation.TagName == tagName &&
reservation.OwnerID == currentOwner &&
reservation.ExpiresAt == now.Add(heartbeatInterval*heartbeatGracePeriodMultiplier)
@@ -134,8 +134,8 @@ func TestGetOrExtendReservation_MaxHeartbeatInterval(t *testing.T) {
resp, err := reservationManager.GetOrExtendReservation(context.Background(), &req)
assert.Nil(t, err)
- assert.Equal(t, currentOwner, resp.GetReservation().OwnerId)
- assert.Equal(t, heartbeatIntervalPb, resp.GetReservation().HeartbeatInterval)
+ assert.Equal(t, currentOwner, resp.GetReservation().GetOwnerId())
+ assert.Equal(t, heartbeatIntervalPb, resp.GetReservation().GetHeartbeatInterval())
}
func TestGetOrExtendReservation_ExtendReservation(t *testing.T) {
@@ -151,10 +151,10 @@ func TestGetOrExtendReservation_ExtendReservation(t *testing.T) {
dcRepo.MockReservationRepo.On("Update",
mock.MatchedBy(func(ctx context.Context) bool { return true }),
mock.MatchedBy(func(reservation models.Reservation) bool {
- return reservation.DatasetProject == datasetID.Project &&
- reservation.DatasetDomain == datasetID.Domain &&
- reservation.DatasetName == datasetID.Name &&
- reservation.DatasetVersion == datasetID.Version &&
+ return reservation.DatasetProject == datasetID.GetProject() &&
+ reservation.DatasetDomain == datasetID.GetDomain() &&
+ reservation.DatasetName == datasetID.GetName() &&
+ reservation.DatasetVersion == datasetID.GetVersion() &&
reservation.TagName == tagName &&
reservation.OwnerID == prevOwner &&
reservation.ExpiresAt == now.Add(heartbeatInterval*heartbeatGracePeriodMultiplier)
@@ -175,7 +175,7 @@ func TestGetOrExtendReservation_ExtendReservation(t *testing.T) {
resp, err := reservationManager.GetOrExtendReservation(context.Background(), &req)
assert.Nil(t, err)
- assert.Equal(t, prevOwner, resp.GetReservation().OwnerId)
+ assert.Equal(t, prevOwner, resp.GetReservation().GetOwnerId())
}
func TestGetOrExtendReservation_TakeOverReservation(t *testing.T) {
@@ -191,10 +191,10 @@ func TestGetOrExtendReservation_TakeOverReservation(t *testing.T) {
dcRepo.MockReservationRepo.On("Update",
mock.MatchedBy(func(ctx context.Context) bool { return true }),
mock.MatchedBy(func(reservation models.Reservation) bool {
- return reservation.DatasetProject == datasetID.Project &&
- reservation.DatasetDomain == datasetID.Domain &&
- reservation.DatasetName == datasetID.Name &&
- reservation.DatasetVersion == datasetID.Version &&
+ return reservation.DatasetProject == datasetID.GetProject() &&
+ reservation.DatasetDomain == datasetID.GetDomain() &&
+ reservation.DatasetName == datasetID.GetName() &&
+ reservation.DatasetVersion == datasetID.GetVersion() &&
reservation.TagName == tagName &&
reservation.OwnerID == currentOwner &&
reservation.ExpiresAt == now.Add(heartbeatInterval*heartbeatGracePeriodMultiplier)
@@ -215,7 +215,7 @@ func TestGetOrExtendReservation_TakeOverReservation(t *testing.T) {
resp, err := reservationManager.GetOrExtendReservation(context.Background(), &req)
assert.Nil(t, err)
- assert.Equal(t, currentOwner, resp.GetReservation().OwnerId)
+ assert.Equal(t, currentOwner, resp.GetReservation().GetOwnerId())
}
func TestGetOrExtendReservation_ReservationExists(t *testing.T) {
@@ -241,7 +241,7 @@ func TestGetOrExtendReservation_ReservationExists(t *testing.T) {
resp, err := reservationManager.GetOrExtendReservation(context.Background(), &req)
assert.Nil(t, err)
- assert.Equal(t, prevOwner, resp.GetReservation().OwnerId)
+ assert.Equal(t, prevOwner, resp.GetReservation().GetOwnerId())
}
func TestReleaseReservation(t *testing.T) {
@@ -252,10 +252,10 @@ func TestReleaseReservation(t *testing.T) {
dcRepo.MockReservationRepo.On("Delete",
mock.MatchedBy(func(ctx context.Context) bool { return true }),
mock.MatchedBy(func(reservationKey models.ReservationKey) bool {
- return reservationKey.DatasetProject == datasetID.Project &&
- reservationKey.DatasetDomain == datasetID.Domain &&
- reservationKey.DatasetName == datasetID.Name &&
- reservationKey.DatasetVersion == datasetID.Version &&
+ return reservationKey.DatasetProject == datasetID.GetProject() &&
+ reservationKey.DatasetDomain == datasetID.GetDomain() &&
+ reservationKey.DatasetName == datasetID.GetName() &&
+ reservationKey.DatasetVersion == datasetID.GetVersion() &&
reservationKey.TagName == tagName
}),
mock.MatchedBy(func(ownerID string) bool {
@@ -286,10 +286,10 @@ func TestReleaseReservation_Failure(t *testing.T) {
dcRepo.MockReservationRepo.On("Delete",
mock.MatchedBy(func(ctx context.Context) bool { return true }),
mock.MatchedBy(func(reservationKey models.ReservationKey) bool {
- return reservationKey.DatasetProject == datasetID.Project &&
- reservationKey.DatasetDomain == datasetID.Domain &&
- reservationKey.DatasetName == datasetID.Name &&
- reservationKey.DatasetVersion == datasetID.Version &&
+ return reservationKey.DatasetProject == datasetID.GetProject() &&
+ reservationKey.DatasetDomain == datasetID.GetDomain() &&
+ reservationKey.DatasetName == datasetID.GetName() &&
+ reservationKey.DatasetVersion == datasetID.GetVersion() &&
reservationKey.TagName == tagName
}),
mock.MatchedBy(func(ownerID string) bool {
@@ -324,10 +324,10 @@ func TestReleaseReservation_GracefulFailure(t *testing.T) {
dcRepo.MockReservationRepo.On("Delete",
mock.MatchedBy(func(ctx context.Context) bool { return true }),
mock.MatchedBy(func(reservationKey models.ReservationKey) bool {
- return reservationKey.DatasetProject == datasetID.Project &&
- reservationKey.DatasetDomain == datasetID.Domain &&
- reservationKey.DatasetName == datasetID.Name &&
- reservationKey.DatasetVersion == datasetID.Version &&
+ return reservationKey.DatasetProject == datasetID.GetProject() &&
+ reservationKey.DatasetDomain == datasetID.GetDomain() &&
+ reservationKey.DatasetName == datasetID.GetName() &&
+ reservationKey.DatasetVersion == datasetID.GetVersion() &&
reservationKey.TagName == tagName
}),
mock.MatchedBy(func(ownerID string) bool {
@@ -360,10 +360,10 @@ func setUpReservationRepoGet(dcRepo *mocks.DataCatalogRepo, prevExpiresAt time.T
dcRepo.MockReservationRepo.On("Get",
mock.MatchedBy(func(ctx context.Context) bool { return true }),
mock.MatchedBy(func(key models.ReservationKey) bool {
- return key.DatasetProject == datasetID.Project &&
- key.DatasetDomain == datasetID.Domain &&
- key.DatasetVersion == datasetID.Version &&
- key.DatasetName == datasetID.Name &&
+ return key.DatasetProject == datasetID.GetProject() &&
+ key.DatasetDomain == datasetID.GetDomain() &&
+ key.DatasetVersion == datasetID.GetVersion() &&
+ key.DatasetName == datasetID.GetName() &&
key.TagName == tagName
})).Return(
models.Reservation{
diff --git a/datacatalog/pkg/manager/impl/tag_manager.go b/datacatalog/pkg/manager/impl/tag_manager.go
index 784af9164c..29280b83b4 100644
--- a/datacatalog/pkg/manager/impl/tag_manager.go
+++ b/datacatalog/pkg/manager/impl/tag_manager.go
@@ -37,15 +37,15 @@ func (m *tagManager) AddTag(ctx context.Context, request *datacatalog.AddTagRequ
timer := m.systemMetrics.createResponseTime.Start(ctx)
defer timer.Stop()
- if err := validators.ValidateTag(request.Tag); err != nil {
+ if err := validators.ValidateTag(request.GetTag()); err != nil {
logger.Warnf(ctx, "Invalid get tag request %+v err: %v", request, err)
m.systemMetrics.validationErrorCounter.Inc(ctx)
return nil, err
}
// verify the artifact and dataset exists before adding a tag to it
- datasetID := request.Tag.Dataset
- ctx = contextutils.WithProjectDomain(ctx, datasetID.Project, datasetID.Domain)
+ datasetID := request.GetTag().GetDataset()
+ ctx = contextutils.WithProjectDomain(ctx, datasetID.GetProject(), datasetID.GetDomain())
datasetKey := transformers.FromDatasetID(datasetID)
dataset, err := m.repo.DatasetRepo().Get(ctx, datasetKey)
@@ -54,17 +54,17 @@ func (m *tagManager) AddTag(ctx context.Context, request *datacatalog.AddTagRequ
return nil, err
}
- artifactKey := transformers.ToArtifactKey(datasetID, request.Tag.ArtifactId)
+ artifactKey := transformers.ToArtifactKey(datasetID, request.GetTag().GetArtifactId())
_, err = m.repo.ArtifactRepo().Get(ctx, artifactKey)
if err != nil {
m.systemMetrics.addTagFailureCounter.Inc(ctx)
return nil, err
}
- tagKey := transformers.ToTagKey(datasetID, request.Tag.Name)
+ tagKey := transformers.ToTagKey(datasetID, request.GetTag().GetName())
err = m.repo.TagRepo().Create(ctx, models.Tag{
TagKey: tagKey,
- ArtifactID: request.Tag.ArtifactId,
+ ArtifactID: request.GetTag().GetArtifactId(),
DatasetUUID: dataset.UUID,
})
if err != nil {
diff --git a/datacatalog/pkg/manager/impl/tag_manager_test.go b/datacatalog/pkg/manager/impl/tag_manager_test.go
index 98e4b41dfd..e77d3abbe1 100644
--- a/datacatalog/pkg/manager/impl/tag_manager_test.go
+++ b/datacatalog/pkg/manager/impl/tag_manager_test.go
@@ -129,7 +129,7 @@ func TestAddTag(t *testing.T) {
_, err := tagManager.AddTag(context.Background(), &datacatalog.AddTagRequest{
Tag: &datacatalog.Tag{
ArtifactId: "noArtifact",
- Dataset: getTestDataset().Id,
+ Dataset: getTestDataset().GetId(),
},
})
@@ -143,7 +143,7 @@ func TestAddTag(t *testing.T) {
_, err := tagManager.AddTag(context.Background(), &datacatalog.AddTagRequest{
Tag: &datacatalog.Tag{
Name: "noArtifact",
- Dataset: getTestDataset().Id,
+ Dataset: getTestDataset().GetId(),
},
})
diff --git a/datacatalog/pkg/manager/impl/validators/artifact_validator.go b/datacatalog/pkg/manager/impl/validators/artifact_validator.go
index d4721e1597..caf1ad3d58 100644
--- a/datacatalog/pkg/manager/impl/validators/artifact_validator.go
+++ b/datacatalog/pkg/manager/impl/validators/artifact_validator.go
@@ -18,10 +18,10 @@ func ValidateGetArtifactRequest(request *datacatalog.GetArtifactRequest) error {
return NewMissingArgumentError(fmt.Sprintf("one of %s/%s", artifactID, tagName))
}
- switch request.QueryHandle.(type) {
+ switch request.GetQueryHandle().(type) {
case *datacatalog.GetArtifactRequest_ArtifactId:
- if request.Dataset != nil {
- err := ValidateDatasetID(request.Dataset)
+ if request.GetDataset() != nil {
+ err := ValidateDatasetID(request.GetDataset())
if err != nil {
return err
}
@@ -31,7 +31,7 @@ func ValidateGetArtifactRequest(request *datacatalog.GetArtifactRequest) error {
return err
}
case *datacatalog.GetArtifactRequest_TagName:
- if err := ValidateDatasetID(request.Dataset); err != nil {
+ if err := ValidateDatasetID(request.GetDataset()); err != nil {
return err
}
@@ -58,15 +58,15 @@ func ValidateArtifact(artifact *datacatalog.Artifact) error {
return NewMissingArgumentError(artifactEntity)
}
- if err := ValidateDatasetID(artifact.Dataset); err != nil {
+ if err := ValidateDatasetID(artifact.GetDataset()); err != nil {
return err
}
- if err := ValidateEmptyStringField(artifact.Id, artifactID); err != nil {
+ if err := ValidateEmptyStringField(artifact.GetId(), artifactID); err != nil {
return err
}
- if err := ValidateEmptyArtifactData(artifact.Data); err != nil {
+ if err := ValidateEmptyArtifactData(artifact.GetData()); err != nil {
return err
}
@@ -75,16 +75,16 @@ func ValidateArtifact(artifact *datacatalog.Artifact) error {
// Validate the list request and format the request with proper defaults if not provided
func ValidateListArtifactRequest(request *datacatalog.ListArtifactsRequest) error {
- if err := ValidateDatasetID(request.Dataset); err != nil {
+ if err := ValidateDatasetID(request.GetDataset()); err != nil {
return err
}
- if err := ValidateArtifactFilterTypes(request.Filter.GetFilters()); err != nil {
+ if err := ValidateArtifactFilterTypes(request.GetFilter().GetFilters()); err != nil {
return err
}
- if request.Pagination != nil {
- err := ValidatePagination(request.Pagination)
+ if request.GetPagination() != nil {
+ err := ValidatePagination(request.GetPagination())
if err != nil {
return err
}
@@ -108,10 +108,10 @@ func ValidateUpdateArtifactRequest(request *datacatalog.UpdateArtifactRequest) e
return NewMissingArgumentError(fmt.Sprintf("one of %s/%s", artifactID, tagName))
}
- switch request.QueryHandle.(type) {
+ switch request.GetQueryHandle().(type) {
case *datacatalog.UpdateArtifactRequest_ArtifactId:
- if request.Dataset != nil {
- err := ValidateDatasetID(request.Dataset)
+ if request.GetDataset() != nil {
+ err := ValidateDatasetID(request.GetDataset())
if err != nil {
return err
}
@@ -121,7 +121,7 @@ func ValidateUpdateArtifactRequest(request *datacatalog.UpdateArtifactRequest) e
return err
}
case *datacatalog.UpdateArtifactRequest_TagName:
- if err := ValidateDatasetID(request.Dataset); err != nil {
+ if err := ValidateDatasetID(request.GetDataset()); err != nil {
return err
}
@@ -132,7 +132,7 @@ func ValidateUpdateArtifactRequest(request *datacatalog.UpdateArtifactRequest) e
return NewInvalidArgumentError("QueryHandle", "invalid type")
}
- if err := ValidateEmptyArtifactData(request.Data); err != nil {
+ if err := ValidateEmptyArtifactData(request.GetData()); err != nil {
return err
}
diff --git a/datacatalog/pkg/manager/impl/validators/dataset_validator.go b/datacatalog/pkg/manager/impl/validators/dataset_validator.go
index 5ab010517f..3cd60c57f9 100644
--- a/datacatalog/pkg/manager/impl/validators/dataset_validator.go
+++ b/datacatalog/pkg/manager/impl/validators/dataset_validator.go
@@ -18,16 +18,16 @@ func ValidateDatasetID(ds *datacatalog.DatasetID) error {
if ds == nil {
return NewMissingArgumentError(datasetEntity)
}
- if err := ValidateEmptyStringField(ds.Project, datasetProject); err != nil {
+ if err := ValidateEmptyStringField(ds.GetProject(), datasetProject); err != nil {
return err
}
- if err := ValidateEmptyStringField(ds.Domain, datasetDomain); err != nil {
+ if err := ValidateEmptyStringField(ds.GetDomain(), datasetDomain); err != nil {
return err
}
- if err := ValidateEmptyStringField(ds.Name, datasetName); err != nil {
+ if err := ValidateEmptyStringField(ds.GetName(), datasetName); err != nil {
return err
}
- if err := ValidateEmptyStringField(ds.Version, datasetVersion); err != nil {
+ if err := ValidateEmptyStringField(ds.GetVersion(), datasetVersion); err != nil {
return err
}
return nil
@@ -35,15 +35,15 @@ func ValidateDatasetID(ds *datacatalog.DatasetID) error {
// Ensure list Datasets request is properly constructed
func ValidateListDatasetsRequest(request *datacatalog.ListDatasetsRequest) error {
- if request.Pagination != nil {
- err := ValidatePagination(request.Pagination)
+ if request.GetPagination() != nil {
+ err := ValidatePagination(request.GetPagination())
if err != nil {
return err
}
}
// Datasets cannot be filtered by tag, partitions or artifacts
- for _, filter := range request.Filter.GetFilters() {
+ for _, filter := range request.GetFilter().GetFilters() {
if filter.GetTagFilter() != nil {
return NewInvalidFilterError(common.Dataset, common.Tag)
} else if filter.GetPartitionFilter() != nil {
diff --git a/datacatalog/pkg/manager/impl/validators/errors.go b/datacatalog/pkg/manager/impl/validators/errors.go
index dae123ebfd..eec033b8f9 100644
--- a/datacatalog/pkg/manager/impl/validators/errors.go
+++ b/datacatalog/pkg/manager/impl/validators/errors.go
@@ -14,13 +14,13 @@ const invalidArgFormat = "invalid value for %s, value:[%s]"
const invalidFilterFormat = "%s cannot be filtered by %s properties"
func NewMissingArgumentError(field string) error {
- return errors.NewDataCatalogErrorf(codes.InvalidArgument, fmt.Sprintf(missingFieldFormat, field))
+ return errors.NewDataCatalogErrorf(codes.InvalidArgument, fmt.Sprintf(missingFieldFormat, field)) //nolint
}
func NewInvalidArgumentError(field string, value string) error {
- return errors.NewDataCatalogErrorf(codes.InvalidArgument, fmt.Sprintf(invalidArgFormat, field, value))
+ return errors.NewDataCatalogErrorf(codes.InvalidArgument, fmt.Sprintf(invalidArgFormat, field, value)) //nolint
}
func NewInvalidFilterError(entity common.Entity, propertyEntity common.Entity) error {
- return errors.NewDataCatalogErrorf(codes.InvalidArgument, fmt.Sprintf(invalidFilterFormat, entity, propertyEntity))
+ return errors.NewDataCatalogErrorf(codes.InvalidArgument, fmt.Sprintf(invalidFilterFormat, entity, propertyEntity)) //nolint
}
diff --git a/datacatalog/pkg/manager/impl/validators/pagination_validator.go b/datacatalog/pkg/manager/impl/validators/pagination_validator.go
index 7f37dbe7d5..19072bec74 100644
--- a/datacatalog/pkg/manager/impl/validators/pagination_validator.go
+++ b/datacatalog/pkg/manager/impl/validators/pagination_validator.go
@@ -27,18 +27,18 @@ func ValidateToken(token string) error {
// Validate the pagination options and set default limits
func ValidatePagination(options *datacatalog.PaginationOptions) error {
- err := ValidateToken(options.Token)
+ err := ValidateToken(options.GetToken())
if err != nil {
return err
}
- if options.SortKey != datacatalog.PaginationOptions_CREATION_TIME {
- return errors.NewDataCatalogErrorf(codes.InvalidArgument, "Invalid sort key %v", options.SortKey)
+ if options.GetSortKey() != datacatalog.PaginationOptions_CREATION_TIME {
+ return errors.NewDataCatalogErrorf(codes.InvalidArgument, "Invalid sort key %v", options.GetSortKey())
}
- if options.SortOrder != datacatalog.PaginationOptions_ASCENDING &&
- options.SortOrder != datacatalog.PaginationOptions_DESCENDING {
- return errors.NewDataCatalogErrorf(codes.InvalidArgument, "Invalid sort order %v", options.SortOrder)
+ if options.GetSortOrder() != datacatalog.PaginationOptions_ASCENDING &&
+ options.GetSortOrder() != datacatalog.PaginationOptions_DESCENDING {
+ return errors.NewDataCatalogErrorf(codes.InvalidArgument, "Invalid sort order %v", options.GetSortOrder())
}
return nil
diff --git a/datacatalog/pkg/manager/impl/validators/partition_validator.go b/datacatalog/pkg/manager/impl/validators/partition_validator.go
index 2b94e0e366..567cf300c6 100644
--- a/datacatalog/pkg/manager/impl/validators/partition_validator.go
+++ b/datacatalog/pkg/manager/impl/validators/partition_validator.go
@@ -39,15 +39,15 @@ func ValidatePartitions(datasetPartitionKeys []string, artifactPartitions []*dat
continue
}
- if err := ValidateEmptyStringField(partitionKeyName, artifactPartition.Key); err != nil {
+ if err := ValidateEmptyStringField(partitionKeyName, artifactPartition.GetKey()); err != nil {
partitionErrors = append(partitionErrors, NewMissingArgumentError(fmt.Sprintf("%v[%v]", partitionKeyName, idx)))
- } else if err := ValidateEmptyStringField(partitionValueName, artifactPartition.Value); err != nil {
+ } else if err := ValidateEmptyStringField(partitionValueName, artifactPartition.GetValue()); err != nil {
partitionErrors = append(partitionErrors, NewMissingArgumentError(fmt.Sprintf("%v[%v]", partitionValueName, idx)))
} else {
- _, ok := partitionKeyMatches[artifactPartition.Key]
+ _, ok := partitionKeyMatches[artifactPartition.GetKey()]
if ok {
- partitionKeyMatches[artifactPartition.Key] = true
+ partitionKeyMatches[artifactPartition.GetKey()] = true
} else {
keyMismatch = true
}
diff --git a/datacatalog/pkg/manager/impl/validators/tag_validator.go b/datacatalog/pkg/manager/impl/validators/tag_validator.go
index 4539ad03c2..7fda9c873d 100644
--- a/datacatalog/pkg/manager/impl/validators/tag_validator.go
+++ b/datacatalog/pkg/manager/impl/validators/tag_validator.go
@@ -13,15 +13,15 @@ func ValidateTag(tag *datacatalog.Tag) error {
if tag == nil {
return NewMissingArgumentError(tagEntity)
}
- if err := ValidateDatasetID(tag.Dataset); err != nil {
+ if err := ValidateDatasetID(tag.GetDataset()); err != nil {
return err
}
- if err := ValidateEmptyStringField(tag.Name, tagName); err != nil {
+ if err := ValidateEmptyStringField(tag.GetName(), tagName); err != nil {
return err
}
- if err := ValidateEmptyStringField(tag.ArtifactId, artifactID); err != nil {
+ if err := ValidateEmptyStringField(tag.GetArtifactId(), artifactID); err != nil {
return err
}
return nil
diff --git a/datacatalog/pkg/repositories/errors/postgres.go b/datacatalog/pkg/repositories/errors/postgres.go
index 2ab8a2895c..31e1c253d6 100644
--- a/datacatalog/pkg/repositories/errors/postgres.go
+++ b/datacatalog/pkg/repositories/errors/postgres.go
@@ -62,7 +62,7 @@ func (p *postgresErrorTransformer) ToDataCatalogError(err error) error {
case undefinedTable:
return catalogErrors.NewDataCatalogErrorf(codes.InvalidArgument, unsupportedTableOperation, pqError.Message)
default:
- return catalogErrors.NewDataCatalogErrorf(codes.Unknown, fmt.Sprintf(defaultPgError, pqError.Code, pqError.Message))
+ return catalogErrors.NewDataCatalogErrorf(codes.Unknown, fmt.Sprintf(defaultPgError, pqError.Code, pqError.Message)) //nolint
}
}
diff --git a/datacatalog/pkg/repositories/transformers/artifact.go b/datacatalog/pkg/repositories/transformers/artifact.go
index 57890ef4b1..c962fd5ce1 100644
--- a/datacatalog/pkg/repositories/transformers/artifact.go
+++ b/datacatalog/pkg/repositories/transformers/artifact.go
@@ -18,29 +18,29 @@ func SerializedMetadata(metadata *datacatalog.Metadata) ([]byte, error) {
}
func CreateArtifactModel(request *datacatalog.CreateArtifactRequest, artifactData []models.ArtifactData, dataset models.Dataset) (models.Artifact, error) {
- datasetID := request.Artifact.Dataset
+ datasetID := request.GetArtifact().GetDataset()
- serializedMetadata, err := marshalMetadata(request.Artifact.Metadata)
+ serializedMetadata, err := marshalMetadata(request.GetArtifact().GetMetadata())
if err != nil {
return models.Artifact{}, err
}
- partitions := make([]models.Partition, len(request.Artifact.Partitions))
- for i, partition := range request.Artifact.GetPartitions() {
+ partitions := make([]models.Partition, len(request.GetArtifact().GetPartitions()))
+ for i, partition := range request.GetArtifact().GetPartitions() {
partitions[i] = models.Partition{
DatasetUUID: dataset.UUID,
- Key: partition.Key,
- Value: partition.Value,
+ Key: partition.GetKey(),
+ Value: partition.GetValue(),
}
}
return models.Artifact{
ArtifactKey: models.ArtifactKey{
- DatasetProject: datasetID.Project,
- DatasetDomain: datasetID.Domain,
- DatasetName: datasetID.Name,
- DatasetVersion: datasetID.Version,
- ArtifactID: request.Artifact.Id,
+ DatasetProject: datasetID.GetProject(),
+ DatasetDomain: datasetID.GetDomain(),
+ DatasetName: datasetID.GetName(),
+ DatasetVersion: datasetID.GetVersion(),
+ ArtifactID: request.GetArtifact().GetId(),
},
DatasetUUID: dataset.UUID,
ArtifactData: artifactData,
@@ -112,10 +112,10 @@ func ToArtifactKey(datasetID *datacatalog.DatasetID, artifactID string) models.A
ArtifactID: artifactID,
}
if datasetID != nil {
- artifactKey.DatasetProject = datasetID.Project
- artifactKey.DatasetDomain = datasetID.Domain
- artifactKey.DatasetName = datasetID.Name
- artifactKey.DatasetVersion = datasetID.Version
+ artifactKey.DatasetProject = datasetID.GetProject()
+ artifactKey.DatasetDomain = datasetID.GetDomain()
+ artifactKey.DatasetName = datasetID.GetName()
+ artifactKey.DatasetVersion = datasetID.GetVersion()
}
return artifactKey
}
diff --git a/datacatalog/pkg/repositories/transformers/artifact_test.go b/datacatalog/pkg/repositories/transformers/artifact_test.go
index 350a2396aa..5c556fcabb 100644
--- a/datacatalog/pkg/repositories/transformers/artifact_test.go
+++ b/datacatalog/pkg/repositories/transformers/artifact_test.go
@@ -50,11 +50,11 @@ func getTestTags() []models.Tag {
func getDatasetModel() models.Dataset {
return models.Dataset{
DatasetKey: models.DatasetKey{
- Project: datasetID.Project,
- Domain: datasetID.Domain,
- Name: datasetID.Name,
- Version: datasetID.Version,
- UUID: datasetID.UUID,
+ Project: datasetID.GetProject(),
+ Domain: datasetID.GetDomain(),
+ Name: datasetID.GetName(),
+ Version: datasetID.GetVersion(),
+ UUID: datasetID.GetUUID(),
},
}
}
@@ -81,11 +81,11 @@ func TestCreateArtifactModel(t *testing.T) {
artifactModel, err := CreateArtifactModel(createArtifactRequest, testArtifactData, getDatasetModel())
assert.NoError(t, err)
- assert.Equal(t, artifactModel.ArtifactID, createArtifactRequest.Artifact.Id)
- assert.Equal(t, artifactModel.ArtifactKey.DatasetProject, datasetID.Project)
- assert.Equal(t, artifactModel.ArtifactKey.DatasetDomain, datasetID.Domain)
- assert.Equal(t, artifactModel.ArtifactKey.DatasetName, datasetID.Name)
- assert.Equal(t, artifactModel.ArtifactKey.DatasetVersion, datasetID.Version)
+ assert.Equal(t, artifactModel.ArtifactID, createArtifactRequest.GetArtifact().GetId())
+ assert.Equal(t, artifactModel.ArtifactKey.DatasetProject, datasetID.GetProject())
+ assert.Equal(t, artifactModel.ArtifactKey.DatasetDomain, datasetID.GetDomain())
+ assert.Equal(t, artifactModel.ArtifactKey.DatasetName, datasetID.GetName())
+ assert.Equal(t, artifactModel.ArtifactKey.DatasetVersion, datasetID.GetVersion())
assert.EqualValues(t, testArtifactData, artifactModel.ArtifactData)
assert.EqualValues(t, getTestPartitions(), artifactModel.Partitions)
}
@@ -130,32 +130,32 @@ func TestFromArtifactModel(t *testing.T) {
actual, err := FromArtifactModel(artifactModel)
assert.NoError(t, err)
- assert.Equal(t, artifactModel.ArtifactID, actual.Id)
- assert.Equal(t, artifactModel.DatasetProject, actual.Dataset.Project)
- assert.Equal(t, artifactModel.DatasetDomain, actual.Dataset.Domain)
- assert.Equal(t, artifactModel.DatasetName, actual.Dataset.Name)
- assert.Equal(t, artifactModel.DatasetVersion, actual.Dataset.Version)
+ assert.Equal(t, artifactModel.ArtifactID, actual.GetId())
+ assert.Equal(t, artifactModel.DatasetProject, actual.GetDataset().GetProject())
+ assert.Equal(t, artifactModel.DatasetDomain, actual.GetDataset().GetDomain())
+ assert.Equal(t, artifactModel.DatasetName, actual.GetDataset().GetName())
+ assert.Equal(t, artifactModel.DatasetVersion, actual.GetDataset().GetVersion())
- assert.Len(t, actual.Partitions, 2)
- assert.EqualValues(t, artifactModel.Partitions[0].Key, actual.Partitions[0].Key)
- assert.EqualValues(t, artifactModel.Partitions[0].Value, actual.Partitions[0].Value)
- assert.EqualValues(t, artifactModel.Partitions[1].Value, actual.Partitions[1].Value)
- assert.EqualValues(t, artifactModel.Partitions[1].Value, actual.Partitions[1].Value)
+ assert.Len(t, actual.GetPartitions(), 2)
+ assert.EqualValues(t, artifactModel.Partitions[0].Key, actual.GetPartitions()[0].GetKey())
+ assert.EqualValues(t, artifactModel.Partitions[0].Value, actual.GetPartitions()[0].GetValue())
+ assert.EqualValues(t, artifactModel.Partitions[1].Value, actual.GetPartitions()[1].GetValue())
+ assert.EqualValues(t, artifactModel.Partitions[1].Value, actual.GetPartitions()[1].GetValue())
- assert.Len(t, actual.Tags, 1)
- assert.EqualValues(t, artifactModel.Tags[0].TagName, actual.Tags[0].Name)
+ assert.Len(t, actual.GetTags(), 1)
+ assert.EqualValues(t, artifactModel.Tags[0].TagName, actual.GetTags()[0].GetName())
timestampProto, err := ptypes.TimestampProto(createdAt)
assert.NoError(t, err)
- assert.Equal(t, actual.CreatedAt, timestampProto)
+ assert.Equal(t, actual.GetCreatedAt(), timestampProto)
}
func TestToArtifactKey(t *testing.T) {
artifactKey := ToArtifactKey(datasetID, "artifactID-1")
- assert.Equal(t, datasetID.Project, artifactKey.DatasetProject)
- assert.Equal(t, datasetID.Domain, artifactKey.DatasetDomain)
- assert.Equal(t, datasetID.Name, artifactKey.DatasetName)
- assert.Equal(t, datasetID.Version, artifactKey.DatasetVersion)
+ assert.Equal(t, datasetID.GetProject(), artifactKey.DatasetProject)
+ assert.Equal(t, datasetID.GetDomain(), artifactKey.DatasetDomain)
+ assert.Equal(t, datasetID.GetName(), artifactKey.DatasetName)
+ assert.Equal(t, datasetID.GetVersion(), artifactKey.DatasetVersion)
assert.Equal(t, artifactKey.ArtifactID, "artifactID-1")
}
diff --git a/datacatalog/pkg/repositories/transformers/dataset.go b/datacatalog/pkg/repositories/transformers/dataset.go
index 9d5cb168a4..bc8f86e4be 100644
--- a/datacatalog/pkg/repositories/transformers/dataset.go
+++ b/datacatalog/pkg/repositories/transformers/dataset.go
@@ -7,12 +7,12 @@ import (
// Create a dataset model from the Dataset api object. This will serialize the metadata in the dataset as part of the transform
func CreateDatasetModel(dataset *datacatalog.Dataset) (*models.Dataset, error) {
- serializedMetadata, err := marshalMetadata(dataset.Metadata)
+ serializedMetadata, err := marshalMetadata(dataset.GetMetadata())
if err != nil {
return nil, err
}
- partitionKeys := make([]models.PartitionKey, len(dataset.PartitionKeys))
+ partitionKeys := make([]models.PartitionKey, len(dataset.GetPartitionKeys()))
for i, partitionKey := range dataset.GetPartitionKeys() {
partitionKeys[i] = models.PartitionKey{
@@ -22,11 +22,11 @@ func CreateDatasetModel(dataset *datacatalog.Dataset) (*models.Dataset, error) {
return &models.Dataset{
DatasetKey: models.DatasetKey{
- Project: dataset.Id.Project,
- Domain: dataset.Id.Domain,
- Name: dataset.Id.Name,
- Version: dataset.Id.Version,
- UUID: dataset.Id.UUID,
+ Project: dataset.GetId().GetProject(),
+ Domain: dataset.GetId().GetDomain(),
+ Name: dataset.GetId().GetName(),
+ Version: dataset.GetId().GetVersion(),
+ UUID: dataset.GetId().GetUUID(),
},
SerializedMetadata: serializedMetadata,
PartitionKeys: partitionKeys,
@@ -36,11 +36,11 @@ func CreateDatasetModel(dataset *datacatalog.Dataset) (*models.Dataset, error) {
// Create a dataset ID from the dataset key model
func FromDatasetID(datasetID *datacatalog.DatasetID) models.DatasetKey {
return models.DatasetKey{
- Project: datasetID.Project,
- Domain: datasetID.Domain,
- Name: datasetID.Name,
- Version: datasetID.Version,
- UUID: datasetID.UUID,
+ Project: datasetID.GetProject(),
+ Domain: datasetID.GetDomain(),
+ Name: datasetID.GetName(),
+ Version: datasetID.GetVersion(),
+ UUID: datasetID.GetUUID(),
}
}
diff --git a/datacatalog/pkg/repositories/transformers/dataset_test.go b/datacatalog/pkg/repositories/transformers/dataset_test.go
index 25062cf264..39e0e7ec3d 100644
--- a/datacatalog/pkg/repositories/transformers/dataset_test.go
+++ b/datacatalog/pkg/repositories/transformers/dataset_test.go
@@ -25,11 +25,11 @@ var datasetID = &datacatalog.DatasetID{
}
func assertDatasetIDEqualsModel(t *testing.T, idlDataset *datacatalog.DatasetID, model *models.DatasetKey) {
- assert.Equal(t, idlDataset.Project, model.Project)
- assert.Equal(t, idlDataset.Domain, model.Domain)
- assert.Equal(t, idlDataset.Name, model.Name)
- assert.Equal(t, idlDataset.Version, model.Version)
- assert.Equal(t, idlDataset.UUID, model.UUID)
+ assert.Equal(t, idlDataset.GetProject(), model.Project)
+ assert.Equal(t, idlDataset.GetDomain(), model.Domain)
+ assert.Equal(t, idlDataset.GetName(), model.Name)
+ assert.Equal(t, idlDataset.GetVersion(), model.Version)
+ assert.Equal(t, idlDataset.GetUUID(), model.UUID)
}
func TestCreateDatasetModelNoParitions(t *testing.T) {
@@ -40,11 +40,11 @@ func TestCreateDatasetModelNoParitions(t *testing.T) {
datasetModel, err := CreateDatasetModel(dataset)
assert.NoError(t, err)
- assertDatasetIDEqualsModel(t, dataset.Id, &datasetModel.DatasetKey)
+ assertDatasetIDEqualsModel(t, dataset.GetId(), &datasetModel.DatasetKey)
unmarshaledMetadata, err := unmarshalMetadata(datasetModel.SerializedMetadata)
assert.NoError(t, err)
- assert.EqualValues(t, unmarshaledMetadata.KeyMap, metadata.KeyMap)
+ assert.EqualValues(t, unmarshaledMetadata.GetKeyMap(), metadata.GetKeyMap())
assert.Len(t, datasetModel.PartitionKeys, 0)
}
@@ -58,15 +58,15 @@ func TestCreateDatasetModel(t *testing.T) {
datasetModel, err := CreateDatasetModel(dataset)
assert.NoError(t, err)
- assertDatasetIDEqualsModel(t, dataset.Id, &datasetModel.DatasetKey)
+ assertDatasetIDEqualsModel(t, dataset.GetId(), &datasetModel.DatasetKey)
unmarshaledMetadata, err := unmarshalMetadata(datasetModel.SerializedMetadata)
assert.NoError(t, err)
- assert.EqualValues(t, unmarshaledMetadata.KeyMap, metadata.KeyMap)
+ assert.EqualValues(t, unmarshaledMetadata.GetKeyMap(), metadata.GetKeyMap())
assert.Len(t, datasetModel.PartitionKeys, 2)
- assert.Equal(t, datasetModel.PartitionKeys[0], models.PartitionKey{Name: dataset.PartitionKeys[0]})
- assert.Equal(t, datasetModel.PartitionKeys[1], models.PartitionKey{Name: dataset.PartitionKeys[1]})
+ assert.Equal(t, datasetModel.PartitionKeys[0], models.PartitionKey{Name: dataset.GetPartitionKeys()[0]})
+ assert.Equal(t, datasetModel.PartitionKeys[1], models.PartitionKey{Name: dataset.GetPartitionKeys()[1]})
}
func TestFromDatasetID(t *testing.T) {
@@ -86,9 +86,9 @@ func TestFromDatasetModelNoPartitionsOrMetadata(t *testing.T) {
}
dataset, err := FromDatasetModel(*datasetModel)
assert.NoError(t, err)
- assertDatasetIDEqualsModel(t, dataset.Id, &datasetModel.DatasetKey)
- assert.Len(t, dataset.Metadata.KeyMap, 0)
- assert.Len(t, dataset.PartitionKeys, 0)
+ assertDatasetIDEqualsModel(t, dataset.GetId(), &datasetModel.DatasetKey)
+ assert.Len(t, dataset.GetMetadata().GetKeyMap(), 0)
+ assert.Len(t, dataset.GetPartitionKeys(), 0)
}
func TestFromDatasetModelWithPartitions(t *testing.T) {
@@ -108,8 +108,8 @@ func TestFromDatasetModelWithPartitions(t *testing.T) {
}
dataset, err := FromDatasetModel(*datasetModel)
assert.NoError(t, err)
- assertDatasetIDEqualsModel(t, dataset.Id, &datasetModel.DatasetKey)
- assert.Len(t, dataset.Metadata.KeyMap, 2)
- assert.EqualValues(t, dataset.Metadata.KeyMap, metadata.KeyMap)
- assert.Len(t, dataset.PartitionKeys, 2)
+ assertDatasetIDEqualsModel(t, dataset.GetId(), &datasetModel.DatasetKey)
+ assert.Len(t, dataset.GetMetadata().GetKeyMap(), 2)
+ assert.EqualValues(t, dataset.GetMetadata().GetKeyMap(), metadata.GetKeyMap())
+ assert.Len(t, dataset.GetPartitionKeys(), 2)
}
diff --git a/datacatalog/pkg/repositories/transformers/filters.go b/datacatalog/pkg/repositories/transformers/filters.go
index c4ed8b6f08..0c6f083ee4 100644
--- a/datacatalog/pkg/repositories/transformers/filters.go
+++ b/datacatalog/pkg/repositories/transformers/filters.go
@@ -44,7 +44,7 @@ func FilterToListInput(ctx context.Context, sourceEntity common.Entity, filterEx
}
func constructModelFilter(ctx context.Context, singleFilter *datacatalog.SinglePropertyFilter, sourceEntity common.Entity) (models.ModelFilter, error) {
- operator := comparisonOperatorMap[singleFilter.Operator]
+ operator := comparisonOperatorMap[singleFilter.GetOperator()]
var modelFilter models.ModelFilter
switch propertyFilter := singleFilter.GetPropertyFilter().(type) {
@@ -53,8 +53,8 @@ func constructModelFilter(ctx context.Context, singleFilter *datacatalog.SingleP
switch partitionProperty := partitionPropertyFilter.GetProperty().(type) {
case *datacatalog.PartitionPropertyFilter_KeyVal:
- key := partitionProperty.KeyVal.Key
- value := partitionProperty.KeyVal.Value
+ key := partitionProperty.KeyVal.GetKey()
+ value := partitionProperty.KeyVal.GetValue()
logger.Debugf(ctx, "Constructing partition key:[%v], val:[%v] filter", key, value)
if err := validators.ValidateEmptyStringField(key, "PartitionKey"); err != nil {
diff --git a/datacatalog/pkg/repositories/transformers/pagination.go b/datacatalog/pkg/repositories/transformers/pagination.go
index 793779ab46..ed7a7925c1 100644
--- a/datacatalog/pkg/repositories/transformers/pagination.go
+++ b/datacatalog/pkg/repositories/transformers/pagination.go
@@ -23,18 +23,18 @@ func ApplyPagination(paginationOpts *datacatalog.PaginationOptions, input *model
if paginationOpts != nil {
// if the token is empty, that is still valid input since it is optional
- if len(strings.Trim(paginationOpts.Token, " ")) == 0 {
+ if len(strings.Trim(paginationOpts.GetToken(), " ")) == 0 {
offset = common.DefaultPageOffset
} else {
- parsedOffset, err := strconv.ParseInt(paginationOpts.Token, 10, 32)
+ parsedOffset, err := strconv.ParseInt(paginationOpts.GetToken(), 10, 32)
if err != nil {
return errors.NewDataCatalogErrorf(codes.InvalidArgument, "Invalid token %v", offset)
}
offset = int(parsedOffset)
}
- limit = int(paginationOpts.Limit)
- sortKey = paginationOpts.SortKey
- sortOrder = paginationOpts.SortOrder
+ limit = int(paginationOpts.GetLimit())
+ sortKey = paginationOpts.GetSortKey()
+ sortOrder = paginationOpts.GetSortOrder()
}
input.Offset = offset
diff --git a/datacatalog/pkg/repositories/transformers/reservation.go b/datacatalog/pkg/repositories/transformers/reservation.go
index 2ae215be82..11edeb4f26 100644
--- a/datacatalog/pkg/repositories/transformers/reservation.go
+++ b/datacatalog/pkg/repositories/transformers/reservation.go
@@ -12,14 +12,14 @@ import (
)
func FromReservationID(reservationID *datacatalog.ReservationID) models.ReservationKey {
- datasetID := reservationID.DatasetId
+ datasetID := reservationID.GetDatasetId()
return models.ReservationKey{
- DatasetProject: datasetID.Project,
- DatasetDomain: datasetID.Domain,
- DatasetName: datasetID.Name,
- DatasetVersion: datasetID.Version,
- TagName: reservationID.TagName,
+ DatasetProject: datasetID.GetProject(),
+ DatasetDomain: datasetID.GetDomain(),
+ DatasetName: datasetID.GetName(),
+ DatasetVersion: datasetID.GetVersion(),
+ TagName: reservationID.GetTagName(),
}
}
diff --git a/datacatalog/pkg/repositories/transformers/reservation_test.go b/datacatalog/pkg/repositories/transformers/reservation_test.go
index 95ca7795ce..21b8e896fc 100644
--- a/datacatalog/pkg/repositories/transformers/reservation_test.go
+++ b/datacatalog/pkg/repositories/transformers/reservation_test.go
@@ -22,11 +22,11 @@ func TestFromReservationID(t *testing.T) {
}
reservationKey := FromReservationID(&reservationID)
- assert.Equal(t, reservationKey.DatasetProject, reservationID.DatasetId.Project)
- assert.Equal(t, reservationKey.DatasetName, reservationID.DatasetId.Name)
- assert.Equal(t, reservationKey.DatasetDomain, reservationID.DatasetId.Domain)
- assert.Equal(t, reservationKey.DatasetVersion, reservationID.DatasetId.Version)
- assert.Equal(t, reservationKey.TagName, reservationID.TagName)
+ assert.Equal(t, reservationKey.DatasetProject, reservationID.GetDatasetId().GetProject())
+ assert.Equal(t, reservationKey.DatasetName, reservationID.GetDatasetId().GetName())
+ assert.Equal(t, reservationKey.DatasetDomain, reservationID.GetDatasetId().GetDomain())
+ assert.Equal(t, reservationKey.DatasetVersion, reservationID.GetDatasetId().GetVersion())
+ assert.Equal(t, reservationKey.TagName, reservationID.GetTagName())
}
func TestCreateReservation(t *testing.T) {
@@ -47,16 +47,16 @@ func TestCreateReservation(t *testing.T) {
reservation, err := CreateReservation(&modelReservation, heartbeatInterval)
assert.Equal(t, err, nil)
- assert.Equal(t, reservation.ExpiresAt.AsTime(), modelReservation.ExpiresAt.UTC())
- assert.Equal(t, reservation.HeartbeatInterval.AsDuration(), heartbeatInterval)
- assert.Equal(t, reservation.OwnerId, modelReservation.OwnerID)
-
- reservationID := reservation.ReservationId
- assert.Equal(t, reservationID.TagName, modelReservation.TagName)
-
- datasetID := reservationID.DatasetId
- assert.Equal(t, datasetID.Project, modelReservation.DatasetProject)
- assert.Equal(t, datasetID.Name, modelReservation.DatasetName)
- assert.Equal(t, datasetID.Domain, modelReservation.DatasetDomain)
- assert.Equal(t, datasetID.Version, modelReservation.DatasetVersion)
+ assert.Equal(t, reservation.GetExpiresAt().AsTime(), modelReservation.ExpiresAt.UTC())
+ assert.Equal(t, reservation.GetHeartbeatInterval().AsDuration(), heartbeatInterval)
+ assert.Equal(t, reservation.GetOwnerId(), modelReservation.OwnerID)
+
+ reservationID := reservation.GetReservationId()
+ assert.Equal(t, reservationID.GetTagName(), modelReservation.TagName)
+
+ datasetID := reservationID.GetDatasetId()
+ assert.Equal(t, datasetID.GetProject(), modelReservation.DatasetProject)
+ assert.Equal(t, datasetID.GetName(), modelReservation.DatasetName)
+ assert.Equal(t, datasetID.GetDomain(), modelReservation.DatasetDomain)
+ assert.Equal(t, datasetID.GetVersion(), modelReservation.DatasetVersion)
}
diff --git a/datacatalog/pkg/repositories/transformers/tag.go b/datacatalog/pkg/repositories/transformers/tag.go
index df98e22200..7fe1f83220 100644
--- a/datacatalog/pkg/repositories/transformers/tag.go
+++ b/datacatalog/pkg/repositories/transformers/tag.go
@@ -7,10 +7,10 @@ import (
func ToTagKey(datasetID *datacatalog.DatasetID, tagName string) models.TagKey {
return models.TagKey{
- DatasetProject: datasetID.Project,
- DatasetDomain: datasetID.Domain,
- DatasetName: datasetID.Name,
- DatasetVersion: datasetID.Version,
+ DatasetProject: datasetID.GetProject(),
+ DatasetDomain: datasetID.GetDomain(),
+ DatasetName: datasetID.GetName(),
+ DatasetVersion: datasetID.GetVersion(),
TagName: tagName,
}
}
diff --git a/datacatalog/pkg/repositories/transformers/tag_test.go b/datacatalog/pkg/repositories/transformers/tag_test.go
index c2820f6260..f77af243d6 100644
--- a/datacatalog/pkg/repositories/transformers/tag_test.go
+++ b/datacatalog/pkg/repositories/transformers/tag_test.go
@@ -22,10 +22,10 @@ func TestToTagKey(t *testing.T) {
tagKey := ToTagKey(datasetID, tagName)
assert.Equal(t, tagName, tagKey.TagName)
- assert.Equal(t, datasetID.Project, tagKey.DatasetProject)
- assert.Equal(t, datasetID.Domain, tagKey.DatasetDomain)
- assert.Equal(t, datasetID.Name, tagKey.DatasetName)
- assert.Equal(t, datasetID.Version, tagKey.DatasetVersion)
+ assert.Equal(t, datasetID.GetProject(), tagKey.DatasetProject)
+ assert.Equal(t, datasetID.GetDomain(), tagKey.DatasetDomain)
+ assert.Equal(t, datasetID.GetName(), tagKey.DatasetName)
+ assert.Equal(t, datasetID.GetVersion(), tagKey.DatasetVersion)
}
func TestFromTagModel(t *testing.T) {
@@ -46,10 +46,10 @@ func TestFromTagModel(t *testing.T) {
tag := FromTagModel(datasetID, tagModel)
- assert.Equal(t, tag.Name, tagModel.TagName)
- assert.Equal(t, datasetID.Project, tag.Dataset.Project)
- assert.Equal(t, datasetID.Domain, tag.Dataset.Domain)
- assert.Equal(t, datasetID.Name, tag.Dataset.Name)
- assert.Equal(t, datasetID.Version, tag.Dataset.Version)
- assert.Equal(t, datasetID.UUID, tag.Dataset.UUID)
+ assert.Equal(t, tag.GetName(), tagModel.TagName)
+ assert.Equal(t, datasetID.GetProject(), tag.GetDataset().GetProject())
+ assert.Equal(t, datasetID.GetDomain(), tag.GetDataset().GetDomain())
+ assert.Equal(t, datasetID.GetName(), tag.GetDataset().GetName())
+ assert.Equal(t, datasetID.GetVersion(), tag.GetDataset().GetVersion())
+ assert.Equal(t, datasetID.GetUUID(), tag.GetDataset().GetUUID())
}
diff --git a/datacatalog/pkg/repositories/transformers/util_test.go b/datacatalog/pkg/repositories/transformers/util_test.go
index bdbd1c642c..1d0c666e82 100644
--- a/datacatalog/pkg/repositories/transformers/util_test.go
+++ b/datacatalog/pkg/repositories/transformers/util_test.go
@@ -12,7 +12,7 @@ func TestMarshaling(t *testing.T) {
unmarshaledMetadata, err := unmarshalMetadata(marshaledMetadata)
assert.NoError(t, err)
- assert.EqualValues(t, unmarshaledMetadata.KeyMap, metadata.KeyMap)
+ assert.EqualValues(t, unmarshaledMetadata.GetKeyMap(), metadata.GetKeyMap())
}
func TestMarshalingWithNil(t *testing.T) {
@@ -21,5 +21,5 @@ func TestMarshalingWithNil(t *testing.T) {
var expectedKeymap map[string]string
unmarshaledMetadata, err := unmarshalMetadata(marshaledMetadata)
assert.NoError(t, err)
- assert.EqualValues(t, expectedKeymap, unmarshaledMetadata.KeyMap)
+ assert.EqualValues(t, expectedKeymap, unmarshaledMetadata.GetKeyMap())
}
diff --git a/flyteadmin/.golangci.yml b/flyteadmin/.golangci.yml
index cd180b89d1..b3e4f05997 100644
--- a/flyteadmin/.golangci.yml
+++ b/flyteadmin/.golangci.yml
@@ -3,30 +3,25 @@ run:
# because we're skipping TLS verification - for now
- cmd/entrypoints/serve.go
- pkg/async/messages/sqs.go
-
linters:
disable-all: true
enable:
- - deadcode
- errcheck
- - gas
+ - gosec
- gci
- goconst
- goimports
- - golint
- gosimple
- govet
- ineffassign
- misspell
- nakedret
- staticcheck
- - structcheck
- typecheck
- unconvert
- unparam
- unused
- - varcheck
-
+ - protogetter
linters-settings:
gci:
custom-order: true
@@ -35,6 +30,8 @@ linters-settings:
- default
- prefix(github.com/flyteorg)
skip-generated: true
+ goconst:
+ ignore-tests: true
issues:
exclude-rules:
- path: pkg/workflowengine/impl/prepare_execution.go
diff --git a/flyteadmin/auth/authzserver/claims_verifier_test.go b/flyteadmin/auth/authzserver/claims_verifier_test.go
index dca3cf6e22..568b248ccd 100644
--- a/flyteadmin/auth/authzserver/claims_verifier_test.go
+++ b/flyteadmin/auth/authzserver/claims_verifier_test.go
@@ -30,7 +30,7 @@ func Test_verifyClaims(t *testing.T) {
assert.Equal(t, "my-client", identityCtx.AppID())
assert.Equal(t, "123", identityCtx.UserID())
assert.Equal(t, "https://myserver", identityCtx.Audience())
- assert.Equal(t, "byhsu@linkedin.com", identityCtx.UserInfo().Email)
+ assert.Equal(t, "byhsu@linkedin.com", identityCtx.UserInfo().GetEmail())
})
t.Run("Multiple audience", func(t *testing.T) {
diff --git a/flyteadmin/auth/authzserver/metadata_provider_test.go b/flyteadmin/auth/authzserver/metadata_provider_test.go
index c8f92fe8cc..c02825360f 100644
--- a/flyteadmin/auth/authzserver/metadata_provider_test.go
+++ b/flyteadmin/auth/authzserver/metadata_provider_test.go
@@ -35,10 +35,10 @@ func TestOAuth2MetadataProvider_FlyteClient(t *testing.T) {
ctx := context.Background()
resp, err := provider.GetPublicClientConfig(ctx, &service.PublicClientAuthConfigRequest{})
assert.NoError(t, err)
- assert.Equal(t, "my-client", resp.ClientId)
- assert.Equal(t, "client/", resp.RedirectUri)
- assert.Equal(t, []string{"all"}, resp.Scopes)
- assert.Equal(t, "http://dummyServer", resp.Audience)
+ assert.Equal(t, "my-client", resp.GetClientId())
+ assert.Equal(t, "client/", resp.GetRedirectUri())
+ assert.Equal(t, []string{"all"}, resp.GetScopes())
+ assert.Equal(t, "http://dummyServer", resp.GetAudience())
}
func TestOAuth2MetadataProvider_OAuth2Metadata(t *testing.T) {
@@ -50,7 +50,7 @@ func TestOAuth2MetadataProvider_OAuth2Metadata(t *testing.T) {
ctx := context.Background()
resp, err := provider.GetOAuth2Metadata(ctx, &service.OAuth2MetadataRequest{})
assert.NoError(t, err)
- assert.Equal(t, "https://issuer/", resp.Issuer)
+ assert.Equal(t, "https://issuer/", resp.GetIssuer())
})
var issuer string
@@ -91,7 +91,7 @@ func TestOAuth2MetadataProvider_OAuth2Metadata(t *testing.T) {
ctx := context.Background()
resp, err := provider.GetOAuth2Metadata(ctx, &service.OAuth2MetadataRequest{})
assert.NoError(t, err)
- assert.Equal(t, "https://dev-14186422.okta.com", resp.Issuer)
+ assert.Equal(t, "https://dev-14186422.okta.com", resp.GetIssuer())
})
t.Run("External AuthServer fallback url", func(t *testing.T) {
@@ -110,7 +110,7 @@ func TestOAuth2MetadataProvider_OAuth2Metadata(t *testing.T) {
ctx := context.Background()
resp, err := provider.GetOAuth2Metadata(ctx, &service.OAuth2MetadataRequest{})
assert.NoError(t, err)
- assert.Equal(t, "https://dev-14186422.okta.com", resp.Issuer)
+ assert.Equal(t, "https://dev-14186422.okta.com", resp.GetIssuer())
})
}
diff --git a/flyteadmin/auth/authzserver/resource_server.go b/flyteadmin/auth/authzserver/resource_server.go
index 59b984365d..917e0fcfaf 100644
--- a/flyteadmin/auth/authzserver/resource_server.go
+++ b/flyteadmin/auth/authzserver/resource_server.go
@@ -116,7 +116,7 @@ func getJwksForIssuer(ctx context.Context, issuerBaseURL url.URL, cfg authConfig
return nil, fmt.Errorf("failed to decode provider discovery object: %v", err)
}
- return oidc.NewRemoteKeySet(oidc.ClientContext(ctx, httpClient), p.JwksUri), nil
+ return oidc.NewRemoteKeySet(oidc.ClientContext(ctx, httpClient), p.GetJwksUri()), nil
}
// NewOAuth2ResourceServer initializes a new OAuth2ResourceServer.
diff --git a/flyteadmin/auth/authzserver/resource_server_test.go b/flyteadmin/auth/authzserver/resource_server_test.go
index 6ee94184ed..9541e73ccd 100644
--- a/flyteadmin/auth/authzserver/resource_server_test.go
+++ b/flyteadmin/auth/authzserver/resource_server_test.go
@@ -61,7 +61,7 @@ func newMockResourceServer(t testing.TB, publicKey rsa.PublicKey) (resourceServe
}
w.Header().Set("Content-Type", "application/json")
- _, err = io.WriteString(w, string(raw))
+ _, err = io.Writer.Write(w, raw)
if !assert.NoError(t, err) {
t.FailNow()
diff --git a/flyteadmin/auth/handler_utils.go b/flyteadmin/auth/handler_utils.go
index a6b4031ca8..dd7a97d1af 100644
--- a/flyteadmin/auth/handler_utils.go
+++ b/flyteadmin/auth/handler_utils.go
@@ -168,6 +168,7 @@ func GetRedirectURLAllowed(ctx context.Context, urlRedirectParam string, cfg *co
}
logger.Debugf(ctx, "validating whether redirect url: %s is authorized", redirectURL)
for _, authorizedURI := range cfg.AuthorizedURIs {
+ authorizedURI := authorizedURI
if isAuthorizedRedirectURL(redirectURL, &authorizedURI.URL) {
logger.Debugf(ctx, "authorizing redirect url: %s against authorized uri: %s", redirectURL.String(), authorizedURI.String())
return true
diff --git a/flyteadmin/auth/handlers.go b/flyteadmin/auth/handlers.go
index d8bc626652..002744fbd1 100644
--- a/flyteadmin/auth/handlers.go
+++ b/flyteadmin/auth/handlers.go
@@ -524,8 +524,8 @@ func GetUserInfoForwardResponseHandler() UserInfoForwardResponseHandler {
return func(ctx context.Context, w http.ResponseWriter, m proto.Message) error {
info, ok := m.(*service.UserInfoResponse)
if ok {
- if info.AdditionalClaims != nil {
- for k, v := range info.AdditionalClaims.GetFields() {
+ if info.GetAdditionalClaims() != nil {
+ for k, v := range info.GetAdditionalClaims().GetFields() {
jsonBytes, err := v.MarshalJSON()
if err != nil {
logger.Warningf(ctx, "failed to marshal claim [%s] to json: %v", k, err)
@@ -535,7 +535,7 @@ func GetUserInfoForwardResponseHandler() UserInfoForwardResponseHandler {
w.Header().Set(header, string(jsonBytes))
}
}
- w.Header().Set("X-User-Subject", info.Subject)
+ w.Header().Set("X-User-Subject", info.GetSubject())
}
return nil
}
diff --git a/flyteadmin/auth/identity_context.go b/flyteadmin/auth/identity_context.go
index 05889f7537..ab30088f01 100644
--- a/flyteadmin/auth/identity_context.go
+++ b/flyteadmin/auth/identity_context.go
@@ -103,7 +103,7 @@ func NewIdentityContext(audience, userID, appID string, authenticatedAt time.Tim
userInfo = &service.UserInfoResponse{}
}
- if len(userInfo.Subject) == 0 {
+ if len(userInfo.GetSubject()) == 0 {
userInfo.Subject = userID
}
diff --git a/flyteadmin/auth/identity_context_test.go b/flyteadmin/auth/identity_context_test.go
index 1e72042be0..fb339027a9 100644
--- a/flyteadmin/auth/identity_context_test.go
+++ b/flyteadmin/auth/identity_context_test.go
@@ -22,7 +22,7 @@ func TestGetClaims(t *testing.T) {
assert.NoError(t, err)
assert.EqualValues(t, claims, withClaimsCtx.Claims())
- assert.NotEmpty(t, withClaimsCtx.UserInfo().AdditionalClaims)
+ assert.NotEmpty(t, withClaimsCtx.UserInfo().GetAdditionalClaims())
}
func TestWithExecutionUserIdentifier(t *testing.T) {
diff --git a/flyteadmin/auth/init_secrets.go b/flyteadmin/auth/init_secrets.go
index 6e3d4a3078..fa964c57e9 100644
--- a/flyteadmin/auth/init_secrets.go
+++ b/flyteadmin/auth/init_secrets.go
@@ -8,7 +8,6 @@ import (
"encoding/base64"
"encoding/pem"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
@@ -78,21 +77,21 @@ type SecretsSet struct {
}
func writeSecrets(ctx context.Context, secrets SecretsSet, path string) error {
- err := ioutil.WriteFile(filepath.Join(path, config.SecretNameClaimSymmetricKey), []byte(base64.RawStdEncoding.EncodeToString(secrets.TokenHashKey)), os.ModePerm)
+ err := os.WriteFile(filepath.Join(path, config.SecretNameClaimSymmetricKey), []byte(base64.RawStdEncoding.EncodeToString(secrets.TokenHashKey)), os.ModePerm) // #nosec G306
if err != nil {
return fmt.Errorf("failed to persist token hash key. Error: %w", err)
}
logger.Infof(ctx, "wrote %v", config.SecretNameClaimSymmetricKey)
- err = ioutil.WriteFile(filepath.Join(path, config.SecretNameCookieHashKey), []byte(base64.RawStdEncoding.EncodeToString(secrets.CookieHashKey)), os.ModePerm)
+ err = os.WriteFile(filepath.Join(path, config.SecretNameCookieHashKey), []byte(base64.RawStdEncoding.EncodeToString(secrets.CookieHashKey)), os.ModePerm) // #nosec G306
if err != nil {
return fmt.Errorf("failed to persist cookie hash key. Error: %w", err)
}
logger.Infof(ctx, "wrote %v", config.SecretNameCookieHashKey)
- err = ioutil.WriteFile(filepath.Join(path, config.SecretNameCookieBlockKey), []byte(base64.RawStdEncoding.EncodeToString(secrets.CookieBlockKey)), os.ModePerm)
+ err = os.WriteFile(filepath.Join(path, config.SecretNameCookieBlockKey), []byte(base64.RawStdEncoding.EncodeToString(secrets.CookieBlockKey)), os.ModePerm) // #nosec G306
if err != nil {
return fmt.Errorf("failed to persist cookie block key. Error: %w", err)
}
diff --git a/flyteadmin/dataproxy/service.go b/flyteadmin/dataproxy/service.go
index c02fa3699f..27d03e3eda 100644
--- a/flyteadmin/dataproxy/service.go
+++ b/flyteadmin/dataproxy/service.go
@@ -49,22 +49,22 @@ func (s Service) CreateUploadLocation(ctx context.Context, req *service.CreateUp
// If it exists, and a hash was provided, then check if it matches. If it matches, then proceed as normal otherwise fail.
// If it doesn't exist, then proceed as normal.
- if len(req.Project) == 0 || len(req.Domain) == 0 {
- logger.Infof(ctx, "project and domain are required parameters. Project [%v]. Domain [%v]", req.Project, req.Domain)
+ if len(req.GetProject()) == 0 || len(req.GetDomain()) == 0 {
+ logger.Infof(ctx, "project and domain are required parameters. Project [%v]. Domain [%v]", req.GetProject(), req.GetDomain())
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "project and domain are required parameters")
}
// At least one of the hash or manually given prefix must be provided.
- if len(req.FilenameRoot) == 0 && len(req.ContentMd5) == 0 {
- logger.Infof(ctx, "content_md5 or filename_root is a required parameter. FilenameRoot [%v], ContentMD5 [%v]", req.FilenameRoot, req.ContentMd5)
+ if len(req.GetFilenameRoot()) == 0 && len(req.GetContentMd5()) == 0 {
+ logger.Infof(ctx, "content_md5 or filename_root is a required parameter. FilenameRoot [%v], ContentMD5 [%v]", req.GetFilenameRoot(), req.GetContentMd5())
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument,
"content_md5 or filename_root is a required parameter")
}
// If we fall in here, that means that the full path is deterministic and we should check for existence.
- if len(req.Filename) > 0 && len(req.FilenameRoot) > 0 {
+ if len(req.GetFilename()) > 0 && len(req.GetFilenameRoot()) > 0 {
knownLocation, err := createStorageLocation(ctx, s.dataStore, s.cfg.Upload,
- req.Org, req.Project, req.Domain, req.FilenameRoot, req.Filename)
+ req.GetOrg(), req.GetProject(), req.GetDomain(), req.GetFilenameRoot(), req.GetFilename())
if err != nil {
logger.Errorf(ctx, "failed to create storage location. Error %v", err)
return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to create storage location, Error: %v", err)
@@ -78,15 +78,15 @@ func (s Service) CreateUploadLocation(ctx context.Context, req *service.CreateUp
// Basically if the file exists, then error unless the user also provided a hash and it matches.
// Keep in mind this is just a best effort attempt. There can easily be race conditions where two users
// request the same file at the same time and one of the writes is lost.
- if len(req.ContentMd5) == 0 {
+ if len(req.GetContentMd5()) == 0 {
return nil, errors.NewFlyteAdminErrorf(codes.AlreadyExists, "file already exists at location [%v], specify a matching hash if you wish to rewrite", knownLocation)
}
- base64Digest := base64.StdEncoding.EncodeToString(req.ContentMd5)
+ base64Digest := base64.StdEncoding.EncodeToString(req.GetContentMd5())
if len(metadata.ContentMD5()) == 0 {
// For backward compatibility, dataproxy assumes that the Etag exists if ContentMD5 is not in the metadata.
// Data proxy won't allow people to overwrite the file if both the Etag and the ContentMD5 do not exist.
- hexDigest := hex.EncodeToString(req.ContentMd5)
- base32Digest := base32.StdEncoding.EncodeToString(req.ContentMd5)
+ hexDigest := hex.EncodeToString(req.GetContentMd5())
+ base32Digest := base32.StdEncoding.EncodeToString(req.GetContentMd5())
if hexDigest != metadata.Etag() && base32Digest != metadata.Etag() && base64Digest != metadata.Etag() {
logger.Errorf(ctx, "File already exists at location [%v] but hashes do not match", knownLocation)
return nil, errors.NewFlyteAdminErrorf(codes.AlreadyExists, "file already exists at location [%v], specify a matching hash if you wish to rewrite", knownLocation)
@@ -99,7 +99,7 @@ func (s Service) CreateUploadLocation(ctx context.Context, req *service.CreateUp
}
}
- if expiresIn := req.ExpiresIn; expiresIn != nil {
+ if expiresIn := req.GetExpiresIn(); expiresIn != nil {
if !expiresIn.IsValid() {
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "expiresIn [%v] is invalid", expiresIn)
}
@@ -112,21 +112,21 @@ func (s Service) CreateUploadLocation(ctx context.Context, req *service.CreateUp
req.ExpiresIn = durationpb.New(s.cfg.Upload.MaxExpiresIn.Duration)
}
- if len(req.Filename) == 0 {
+ if len(req.GetFilename()) == 0 {
req.Filename = rand.String(s.cfg.Upload.DefaultFileNameLength)
}
- base64digestMD5 := base64.StdEncoding.EncodeToString(req.ContentMd5)
+ base64digestMD5 := base64.StdEncoding.EncodeToString(req.GetContentMd5())
var prefix string
- if len(req.FilenameRoot) > 0 {
- prefix = req.FilenameRoot
+ if len(req.GetFilenameRoot()) > 0 {
+ prefix = req.GetFilenameRoot()
} else {
// url safe base32 encoding
- prefix = base32.StdEncoding.EncodeToString(req.ContentMd5)
+ prefix = base32.StdEncoding.EncodeToString(req.GetContentMd5())
}
storagePath, err := createStorageLocation(ctx, s.dataStore, s.cfg.Upload,
- req.Org, req.Project, req.Domain, prefix, req.Filename)
+ req.GetOrg(), req.GetProject(), req.GetDomain(), prefix, req.GetFilename())
if err != nil {
logger.Errorf(ctx, "failed to create shardedStorageLocation. Error %v", err)
return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to create shardedStorageLocation, Error: %v", err)
@@ -134,9 +134,9 @@ func (s Service) CreateUploadLocation(ctx context.Context, req *service.CreateUp
resp, err := s.dataStore.CreateSignedURL(ctx, storagePath, storage.SignedURLProperties{
Scope: stow.ClientMethodPut,
- ExpiresIn: req.ExpiresIn.AsDuration(),
+ ExpiresIn: req.GetExpiresIn().AsDuration(),
ContentMD5: base64digestMD5,
- AddContentMD5Metadata: req.AddContentMd5Metadata,
+ AddContentMD5Metadata: req.GetAddContentMd5Metadata(),
})
if err != nil {
@@ -147,7 +147,7 @@ func (s Service) CreateUploadLocation(ctx context.Context, req *service.CreateUp
return &service.CreateUploadLocationResponse{
SignedUrl: resp.URL.String(),
NativeUrl: storagePath.String(),
- ExpiresAt: timestamppb.New(time.Now().Add(req.ExpiresIn.AsDuration())),
+ ExpiresAt: timestamppb.New(time.Now().Add(req.GetExpiresIn().AsDuration())),
Headers: resp.RequiredRequestHeaders,
}, nil
}
@@ -172,7 +172,7 @@ func (s Service) CreateDownloadLink(ctx context.Context, req *service.CreateDown
switch req.GetArtifactType() {
case service.ArtifactType_ARTIFACT_TYPE_DECK:
- nativeURL = node.Closure.DeckUri
+ nativeURL = node.GetClosure().GetDeckUri()
}
} else {
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "unsupported source [%v]", reflect.TypeOf(req.GetSource()))
@@ -194,7 +194,7 @@ func (s Service) CreateDownloadLink(ctx context.Context, req *service.CreateDown
signedURLResp, err := s.dataStore.CreateSignedURL(ctx, ref, storage.SignedURLProperties{
Scope: stow.ClientMethodGet,
- ExpiresIn: req.ExpiresIn.AsDuration(),
+ ExpiresIn: req.GetExpiresIn().AsDuration(),
})
if err != nil {
@@ -202,7 +202,7 @@ func (s Service) CreateDownloadLink(ctx context.Context, req *service.CreateDown
}
u := []string{signedURLResp.URL.String()}
- ts := timestamppb.New(time.Now().Add(req.ExpiresIn.AsDuration()))
+ ts := timestamppb.New(time.Now().Add(req.GetExpiresIn().AsDuration()))
//
return &service.CreateDownloadLinkResponse{
@@ -223,9 +223,9 @@ func (s Service) CreateDownloadLocation(ctx context.Context, req *service.Create
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "error while validating request: %v", err)
}
- resp, err := s.dataStore.CreateSignedURL(ctx, storage.DataReference(req.NativeUrl), storage.SignedURLProperties{
+ resp, err := s.dataStore.CreateSignedURL(ctx, storage.DataReference(req.GetNativeUrl()), storage.SignedURLProperties{
Scope: stow.ClientMethodGet,
- ExpiresIn: req.ExpiresIn.AsDuration(),
+ ExpiresIn: req.GetExpiresIn().AsDuration(),
})
if err != nil {
@@ -234,21 +234,21 @@ func (s Service) CreateDownloadLocation(ctx context.Context, req *service.Create
return &service.CreateDownloadLocationResponse{
SignedUrl: resp.URL.String(),
- ExpiresAt: timestamppb.New(time.Now().Add(req.ExpiresIn.AsDuration())),
+ ExpiresAt: timestamppb.New(time.Now().Add(req.GetExpiresIn().AsDuration())),
}, nil
}
func (s Service) validateCreateDownloadLocationRequest(req *service.CreateDownloadLocationRequest) error {
- validatedExpiresIn, err := validateDuration(req.ExpiresIn, s.cfg.Download.MaxExpiresIn.Duration)
+ validatedExpiresIn, err := validateDuration(req.GetExpiresIn(), s.cfg.Download.MaxExpiresIn.Duration)
if err != nil {
return fmt.Errorf("expiresIn is invalid. Error: %w", err)
}
req.ExpiresIn = validatedExpiresIn
- if _, err := url.Parse(req.NativeUrl); err != nil {
+ if _, err := url.Parse(req.GetNativeUrl()); err != nil {
return fmt.Errorf("failed to parse native_url [%v]",
- req.NativeUrl)
+ req.GetNativeUrl())
}
return nil
@@ -275,7 +275,7 @@ func validateDuration(input *durationpb.Duration, maxAllowed time.Duration) (*du
}
func (s Service) validateCreateDownloadLinkRequest(req *service.CreateDownloadLinkRequest) (*service.CreateDownloadLinkRequest, error) {
- validatedExpiresIn, err := validateDuration(req.ExpiresIn, s.cfg.Download.MaxExpiresIn.Duration)
+ validatedExpiresIn, err := validateDuration(req.GetExpiresIn(), s.cfg.Download.MaxExpiresIn.Duration)
if err != nil {
return nil, fmt.Errorf("expiresIn is invalid. Error: %w", err)
}
@@ -328,16 +328,16 @@ func (s Service) GetCompleteTaskExecutionID(ctx context.Context, taskExecID *cor
taskExecs, err := s.taskExecutionManager.ListTaskExecutions(ctx, &admin.TaskExecutionListRequest{
NodeExecutionId: taskExecID.GetNodeExecutionId(),
Limit: 1,
- Filters: fmt.Sprintf("eq(retry_attempt,%s)", strconv.Itoa(int(taskExecID.RetryAttempt))),
+ Filters: fmt.Sprintf("eq(retry_attempt,%s)", strconv.Itoa(int(taskExecID.GetRetryAttempt()))),
})
if err != nil {
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "failed to list task executions [%v]. Error: %v", taskExecID, err)
}
- if len(taskExecs.TaskExecutions) == 0 {
+ if len(taskExecs.GetTaskExecutions()) == 0 {
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "no task executions were listed [%v]. Error: %v", taskExecID, err)
}
- taskExec := taskExecs.TaskExecutions[0]
- return taskExec.Id, nil
+ taskExec := taskExecs.GetTaskExecutions()[0]
+ return taskExec.GetId(), nil
}
func (s Service) GetTaskExecutionID(ctx context.Context, attempt int, nodeExecID *core.NodeExecutionIdentifier) (*core.TaskExecutionIdentifier, error) {
@@ -349,11 +349,11 @@ func (s Service) GetTaskExecutionID(ctx context.Context, attempt int, nodeExecID
if err != nil {
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "failed to list task executions [%v]. Error: %v", nodeExecID, err)
}
- if len(taskExecs.TaskExecutions) == 0 {
+ if len(taskExecs.GetTaskExecutions()) == 0 {
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "no task executions were listed [%v]. Error: %v", nodeExecID, err)
}
- taskExec := taskExecs.TaskExecutions[0]
- return taskExec.Id, nil
+ taskExec := taskExecs.GetTaskExecutions()[0]
+ return taskExec.GetId(), nil
}
func (s Service) GetDataFromNodeExecution(ctx context.Context, nodeExecID *core.NodeExecutionIdentifier, ioType common.ArtifactType, name string) (
@@ -368,9 +368,9 @@ func (s Service) GetDataFromNodeExecution(ctx context.Context, nodeExecID *core.
var lm *core.LiteralMap
if ioType == common.ArtifactTypeI {
- lm = resp.FullInputs
+ lm = resp.GetFullInputs()
} else if ioType == common.ArtifactTypeO {
- lm = resp.FullOutputs
+ lm = resp.GetFullOutputs()
} else {
// Assume deck, and create a download link request
dlRequest := service.CreateDownloadLinkRequest{
@@ -383,13 +383,13 @@ func (s Service) GetDataFromNodeExecution(ctx context.Context, nodeExecID *core.
}
return &service.GetDataResponse{
Data: &service.GetDataResponse_PreSignedUrls{
- PreSignedUrls: resp.PreSignedUrls,
+ PreSignedUrls: resp.GetPreSignedUrls(),
},
}, nil
}
if name != "" {
- if literal, ok := lm.Literals[name]; ok {
+ if literal, ok := lm.GetLiterals()[name]; ok {
return &service.GetDataResponse{
Data: &service.GetDataResponse_Literal{
Literal: literal,
@@ -418,15 +418,15 @@ func (s Service) GetDataFromTaskExecution(ctx context.Context, taskExecID *core.
}
if ioType == common.ArtifactTypeI {
- lm = resp.FullInputs
+ lm = resp.GetFullInputs()
} else if ioType == common.ArtifactTypeO {
- lm = resp.FullOutputs
+ lm = resp.GetFullOutputs()
} else {
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "deck type cannot be specified with a retry attempt, just use the node instead")
}
if name != "" {
- if literal, ok := lm.Literals[name]; ok {
+ if literal, ok := lm.GetLiterals()[name]; ok {
return &service.GetDataResponse{
Data: &service.GetDataResponse_Literal{
Literal: literal,
diff --git a/flyteadmin/pkg/async/cloudevent/implementations/cloudevent_publisher.go b/flyteadmin/pkg/async/cloudevent/implementations/cloudevent_publisher.go
index 7aaab0bb60..e21d9b0b5a 100644
--- a/flyteadmin/pkg/async/cloudevent/implementations/cloudevent_publisher.go
+++ b/flyteadmin/pkg/async/cloudevent/implementations/cloudevent_publisher.go
@@ -57,20 +57,20 @@ func (p *Publisher) Publish(ctx context.Context, notificationType string, msg pr
switch msgType := msg.(type) {
case *admin.WorkflowExecutionEventRequest:
- e := msgType.Event
- executionID = e.ExecutionId.String()
- phase = e.Phase.String()
- eventTime = e.OccurredAt.AsTime()
+ e := msgType.GetEvent()
+ executionID = e.GetExecutionId().String()
+ phase = e.GetPhase().String()
+ eventTime = e.GetOccurredAt().AsTime()
case *admin.TaskExecutionEventRequest:
- e := msgType.Event
- executionID = e.TaskId.String()
- phase = e.Phase.String()
- eventTime = e.OccurredAt.AsTime()
+ e := msgType.GetEvent()
+ executionID = e.GetTaskId().String()
+ phase = e.GetPhase().String()
+ eventTime = e.GetOccurredAt().AsTime()
case *admin.NodeExecutionEventRequest:
- e := msgType.Event
- executionID = msgType.Event.Id.String()
- phase = e.Phase.String()
- eventTime = e.OccurredAt.AsTime()
+ e := msgType.GetEvent()
+ executionID = msgType.GetEvent().GetId().String()
+ phase = e.GetPhase().String()
+ eventTime = e.GetOccurredAt().AsTime()
default:
return fmt.Errorf("unsupported event types [%+v]", reflect.TypeOf(msg))
}
@@ -128,13 +128,13 @@ func (c *CloudEventWrappedPublisher) TransformWorkflowExecutionEvent(ctx context
if rawEvent == nil {
return nil, fmt.Errorf("nothing to publish, WorkflowExecution event is nil")
}
- if rawEvent.ExecutionId == nil {
+ if rawEvent.GetExecutionId() == nil {
logger.Warningf(ctx, "nil execution id in event [%+v]", rawEvent)
return nil, fmt.Errorf("nil execution id in event [%+v]", rawEvent)
}
// For now, don't append any additional information unless succeeded
- if rawEvent.Phase != core.WorkflowExecution_SUCCEEDED {
+ if rawEvent.GetPhase() != core.WorkflowExecution_SUCCEEDED {
return &event.CloudEventWorkflowExecution{
RawEvent: rawEvent,
}, nil
@@ -142,35 +142,35 @@ func (c *CloudEventWrappedPublisher) TransformWorkflowExecutionEvent(ctx context
// TODO: Make this one call to the DB instead of two.
executionModel, err := c.db.ExecutionRepo().Get(ctx, repositoryInterfaces.Identifier{
- Project: rawEvent.ExecutionId.Project,
- Domain: rawEvent.ExecutionId.Domain,
- Name: rawEvent.ExecutionId.Name,
+ Project: rawEvent.GetExecutionId().GetProject(),
+ Domain: rawEvent.GetExecutionId().GetDomain(),
+ Name: rawEvent.GetExecutionId().GetName(),
})
if err != nil {
- logger.Warningf(ctx, "couldn't find execution [%+v] for cloud event processing", rawEvent.ExecutionId)
+ logger.Warningf(ctx, "couldn't find execution [%+v] for cloud event processing", rawEvent.GetExecutionId())
return nil, err
}
ex, err := transformers.FromExecutionModel(ctx, executionModel, transformers.DefaultExecutionTransformerOptions)
if err != nil {
- logger.Warningf(ctx, "couldn't transform execution [%+v] for cloud event processing", rawEvent.ExecutionId)
+ logger.Warningf(ctx, "couldn't transform execution [%+v] for cloud event processing", rawEvent.GetExecutionId())
return nil, err
}
- if ex.Closure.WorkflowId == nil {
+ if ex.GetClosure().GetWorkflowId() == nil {
logger.Warningf(ctx, "workflow id is nil for execution [%+v]", ex)
return nil, fmt.Errorf("workflow id is nil for execution [%+v]", ex)
}
workflowModel, err := c.db.WorkflowRepo().Get(ctx, repositoryInterfaces.Identifier{
- Project: ex.Closure.WorkflowId.Project,
- Domain: ex.Closure.WorkflowId.Domain,
- Name: ex.Closure.WorkflowId.Name,
- Version: ex.Closure.WorkflowId.Version,
+ Project: ex.GetClosure().GetWorkflowId().GetProject(),
+ Domain: ex.GetClosure().GetWorkflowId().GetDomain(),
+ Name: ex.GetClosure().GetWorkflowId().GetName(),
+ Version: ex.GetClosure().GetWorkflowId().GetVersion(),
})
if err != nil {
- logger.Warningf(ctx, "couldn't find workflow [%+v] for cloud event processing", ex.Closure.WorkflowId)
+ logger.Warningf(ctx, "couldn't find workflow [%+v] for cloud event processing", ex.GetClosure().GetWorkflowId())
return nil, err
}
var workflowInterface core.TypedInterface
- if workflowModel.TypedInterface != nil && len(workflowModel.TypedInterface) > 0 {
+ if len(workflowModel.TypedInterface) > 0 {
err = proto.Unmarshal(workflowModel.TypedInterface, &workflowInterface)
if err != nil {
return nil, fmt.Errorf(
@@ -191,15 +191,15 @@ func (c *CloudEventWrappedPublisher) TransformWorkflowExecutionEvent(ctx context
OutputInterface: &workflowInterface,
ArtifactIds: spec.GetMetadata().GetArtifactIds(),
ReferenceExecution: spec.GetMetadata().GetReferenceExecution(),
- Principal: spec.GetMetadata().Principal,
- LaunchPlanId: spec.LaunchPlan,
+ Principal: spec.GetMetadata().GetPrincipal(),
+ LaunchPlanId: spec.GetLaunchPlan(),
}, nil
}
func getNodeExecutionContext(ctx context.Context, identifier *core.NodeExecutionIdentifier) context.Context {
- ctx = contextutils.WithProjectDomain(ctx, identifier.ExecutionId.Project, identifier.ExecutionId.Domain)
- ctx = contextutils.WithExecutionID(ctx, identifier.ExecutionId.Name)
- return contextutils.WithNodeID(ctx, identifier.NodeId)
+ ctx = contextutils.WithProjectDomain(ctx, identifier.GetExecutionId().GetProject(), identifier.GetExecutionId().GetDomain())
+ ctx = contextutils.WithExecutionID(ctx, identifier.GetExecutionId().GetName())
+ return contextutils.WithNodeID(ctx, identifier.GetNodeId())
}
// This is a rough copy of the ListTaskExecutions function in TaskExecutionManager. It can be deprecated once we move the processing out of Admin itself.
@@ -230,7 +230,7 @@ func (c *CloudEventWrappedPublisher) getLatestTaskExecutions(ctx context.Context
if err != nil {
return nil, err
}
- if output.TaskExecutions == nil || len(output.TaskExecutions) == 0 {
+ if len(output.TaskExecutions) == 0 {
logger.Debugf(ctx, "no task executions found for node exec id [%+v]", nodeExecutionID)
return nil, nil
}
@@ -245,16 +245,16 @@ func (c *CloudEventWrappedPublisher) getLatestTaskExecutions(ctx context.Context
}
func (c *CloudEventWrappedPublisher) TransformNodeExecutionEvent(ctx context.Context, rawEvent *event.NodeExecutionEvent) (*event.CloudEventNodeExecution, error) {
- if rawEvent == nil || rawEvent.Id == nil {
+ if rawEvent == nil || rawEvent.GetId() == nil {
return nil, fmt.Errorf("nothing to publish, NodeExecution event or ID is nil")
}
// Skip nodes unless they're succeeded and not start nodes
- if rawEvent.Phase != core.NodeExecution_SUCCEEDED {
+ if rawEvent.GetPhase() != core.NodeExecution_SUCCEEDED {
return &event.CloudEventNodeExecution{
RawEvent: rawEvent,
}, nil
- } else if rawEvent.Id.NodeId == "start-node" {
+ } else if rawEvent.GetId().GetNodeId() == "start-node" {
return &event.CloudEventNodeExecution{
RawEvent: rawEvent,
}, nil
@@ -263,12 +263,12 @@ func (c *CloudEventWrappedPublisher) TransformNodeExecutionEvent(ctx context.Con
// This gets the parent workflow execution metadata
executionModel, err := c.db.ExecutionRepo().Get(ctx, repositoryInterfaces.Identifier{
- Project: rawEvent.Id.ExecutionId.Project,
- Domain: rawEvent.Id.ExecutionId.Domain,
- Name: rawEvent.Id.ExecutionId.Name,
+ Project: rawEvent.GetId().GetExecutionId().GetProject(),
+ Domain: rawEvent.GetId().GetExecutionId().GetDomain(),
+ Name: rawEvent.GetId().GetExecutionId().GetName(),
})
if err != nil {
- logger.Infof(ctx, "couldn't find execution [%+v] for cloud event processing", rawEvent.Id.ExecutionId)
+ logger.Infof(ctx, "couldn't find execution [%+v] for cloud event processing", rawEvent.GetId().GetExecutionId())
return nil, err
}
@@ -283,22 +283,22 @@ func (c *CloudEventWrappedPublisher) TransformNodeExecutionEvent(ctx context.Con
var taskExecID *core.TaskExecutionIdentifier
var typedInterface *core.TypedInterface
- lte, err := c.getLatestTaskExecutions(ctx, rawEvent.Id)
+ lte, err := c.getLatestTaskExecutions(ctx, rawEvent.GetId())
if err != nil {
- logger.Errorf(ctx, "failed to get latest task execution for node exec id [%+v] with err: %v", rawEvent.Id, err)
+ logger.Errorf(ctx, "failed to get latest task execution for node exec id [%+v] with err: %v", rawEvent.GetId(), err)
return nil, err
}
if lte != nil {
taskModel, err := c.db.TaskRepo().Get(ctx, repositoryInterfaces.Identifier{
- Project: lte.Id.TaskId.Project,
- Domain: lte.Id.TaskId.Domain,
- Name: lte.Id.TaskId.Name,
- Version: lte.Id.TaskId.Version,
+ Project: lte.GetId().GetTaskId().GetProject(),
+ Domain: lte.GetId().GetTaskId().GetDomain(),
+ Name: lte.GetId().GetTaskId().GetName(),
+ Version: lte.GetId().GetTaskId().GetVersion(),
})
if err != nil {
// TODO: metric this
// metric
- logger.Debugf(ctx, "Failed to get task with task id [%+v] with err %v", lte.Id.TaskId, err)
+ logger.Debugf(ctx, "Failed to get task with task id [%+v] with err %v", lte.GetId().GetTaskId(), err)
return nil, err
}
task, err := transformers.FromTaskModel(taskModel)
@@ -306,8 +306,8 @@ func (c *CloudEventWrappedPublisher) TransformNodeExecutionEvent(ctx context.Con
logger.Debugf(ctx, "Failed to transform task model with err %v", err)
return nil, err
}
- typedInterface = task.Closure.CompiledTask.Template.Interface
- taskExecID = lte.Id
+ typedInterface = task.GetClosure().GetCompiledTask().GetTemplate().GetInterface()
+ taskExecID = lte.GetId()
}
return &event.CloudEventNodeExecution{
@@ -315,8 +315,8 @@ func (c *CloudEventWrappedPublisher) TransformNodeExecutionEvent(ctx context.Con
TaskExecId: taskExecID,
OutputInterface: typedInterface,
ArtifactIds: spec.GetMetadata().GetArtifactIds(),
- Principal: spec.GetMetadata().Principal,
- LaunchPlanId: spec.LaunchPlan,
+ Principal: spec.GetMetadata().GetPrincipal(),
+ LaunchPlanId: spec.GetLaunchPlan(),
}, nil
}
@@ -348,14 +348,14 @@ func (c *CloudEventWrappedPublisher) Publish(ctx context.Context, notificationTy
switch msgType := msg.(type) {
case *admin.WorkflowExecutionEventRequest:
topic = "cloudevents.WorkflowExecution"
- e := msgType.Event
- executionID = e.ExecutionId.String()
- phase = e.Phase.String()
- eventTime = e.OccurredAt.AsTime()
+ e := msgType.GetEvent()
+ executionID = e.GetExecutionId().String()
+ phase = e.GetPhase().String()
+ eventTime = e.GetOccurredAt().AsTime()
dummyNodeExecutionID := &core.NodeExecutionIdentifier{
NodeId: "end-node",
- ExecutionId: e.ExecutionId,
+ ExecutionId: e.GetExecutionId(),
}
// This forms part of the key in the Artifact store,
// but it should probably be entirely derived by that service instead.
@@ -369,17 +369,17 @@ func (c *CloudEventWrappedPublisher) Publish(ctx context.Context, notificationTy
case *admin.TaskExecutionEventRequest:
topic = "cloudevents.TaskExecution"
- e := msgType.Event
- executionID = e.TaskId.String()
- phase = e.Phase.String()
- eventTime = e.OccurredAt.AsTime()
+ e := msgType.GetEvent()
+ executionID = e.GetTaskId().String()
+ phase = e.GetPhase().String()
+ eventTime = e.GetOccurredAt().AsTime()
eventID = fmt.Sprintf("%v.%v", executionID, phase)
- if e.ParentNodeExecutionId == nil {
+ if e.GetParentNodeExecutionId() == nil {
return fmt.Errorf("parent node execution id is nil for task execution [%+v]", e)
}
- eventSource = common.FlyteURLKeyFromNodeExecutionIDRetry(e.ParentNodeExecutionId,
- int(e.RetryAttempt))
+ eventSource = common.FlyteURLKeyFromNodeExecutionIDRetry(e.GetParentNodeExecutionId(),
+ int(e.GetRetryAttempt()))
finalMsg, err = c.TransformTaskExecutionEvent(ctx, e)
if err != nil {
logger.Errorf(ctx, "Failed to transform task execution event with error: %v", err)
@@ -387,12 +387,12 @@ func (c *CloudEventWrappedPublisher) Publish(ctx context.Context, notificationTy
}
case *admin.NodeExecutionEventRequest:
topic = "cloudevents.NodeExecution"
- e := msgType.Event
- executionID = msgType.Event.Id.String()
- phase = e.Phase.String()
- eventTime = e.OccurredAt.AsTime()
+ e := msgType.GetEvent()
+ executionID = msgType.GetEvent().GetId().String()
+ phase = e.GetPhase().String()
+ eventTime = e.GetOccurredAt().AsTime()
eventID = fmt.Sprintf("%v.%v", executionID, phase)
- eventSource = common.FlyteURLKeyFromNodeExecutionID(msgType.Event.Id)
+ eventSource = common.FlyteURLKeyFromNodeExecutionID(msgType.GetEvent().GetId())
finalMsg, err = c.TransformNodeExecutionEvent(ctx, e)
if err != nil {
logger.Errorf(ctx, "Failed to transform node execution event with error: %v", err)
@@ -400,7 +400,7 @@ func (c *CloudEventWrappedPublisher) Publish(ctx context.Context, notificationTy
}
case *event.CloudEventExecutionStart:
topic = "cloudevents.ExecutionStart"
- executionID = msgType.ExecutionId.String()
+ executionID = msgType.GetExecutionId().String()
eventID = fmt.Sprintf("%v", executionID)
eventTime = time.Now()
// CloudEventExecutionStart don't have a nested event
diff --git a/flyteadmin/pkg/async/notifications/email.go b/flyteadmin/pkg/async/notifications/email.go
index a89210cead..e23806cc12 100644
--- a/flyteadmin/pkg/async/notifications/email.go
+++ b/flyteadmin/pkg/async/notifications/email.go
@@ -30,58 +30,58 @@ const launchPlanVersion = "launch_plan.version"
const replaceAllInstances = -1
func getProject(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string {
- return exec.Id.Project
+ return exec.GetId().GetProject()
}
func getDomain(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string {
- return exec.Id.Domain
+ return exec.GetId().GetDomain()
}
func getName(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string {
- return exec.Id.Name
+ return exec.GetId().GetName()
}
func getPhase(request *admin.WorkflowExecutionEventRequest, _ *admin.Execution) string {
- return strings.ToLower(request.Event.Phase.String())
+ return strings.ToLower(request.GetEvent().GetPhase().String())
}
func getError(request *admin.WorkflowExecutionEventRequest, _ *admin.Execution) string {
- if request.Event.GetError() != nil {
- return fmt.Sprintf(executionError, request.Event.GetError().Message)
+ if request.GetEvent().GetError() != nil {
+ return fmt.Sprintf(executionError, request.GetEvent().GetError().GetMessage())
}
return ""
}
func getWorkflowProject(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string {
- return exec.Closure.WorkflowId.Project
+ return exec.GetClosure().GetWorkflowId().GetProject()
}
func getWorkflowDomain(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string {
- return exec.Closure.WorkflowId.Domain
+ return exec.GetClosure().GetWorkflowId().GetDomain()
}
func getWorkflowName(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string {
- return exec.Closure.WorkflowId.Name
+ return exec.GetClosure().GetWorkflowId().GetName()
}
func getWorkflowVersion(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string {
- return exec.Closure.WorkflowId.Version
+ return exec.GetClosure().GetWorkflowId().GetVersion()
}
func getLaunchPlanProject(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string {
- return exec.Spec.LaunchPlan.Project
+ return exec.GetSpec().GetLaunchPlan().GetProject()
}
func getLaunchPlanDomain(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string {
- return exec.Spec.LaunchPlan.Domain
+ return exec.GetSpec().GetLaunchPlan().GetDomain()
}
func getLaunchPlanName(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string {
- return exec.Spec.LaunchPlan.Name
+ return exec.GetSpec().GetLaunchPlan().GetName()
}
func getLaunchPlanVersion(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string {
- return exec.Spec.LaunchPlan.Version
+ return exec.GetSpec().GetLaunchPlan().GetVersion()
}
var getTemplateValueFuncs = map[string]GetTemplateValue{
diff --git a/flyteadmin/pkg/async/notifications/email_test.go b/flyteadmin/pkg/async/notifications/email_test.go
index 35f351a45d..0d045603e8 100644
--- a/flyteadmin/pkg/async/notifications/email_test.go
+++ b/flyteadmin/pkg/async/notifications/email_test.go
@@ -155,8 +155,8 @@ func TestToEmailMessageFromWorkflowExecutionEvent(t *testing.T) {
SubjectLine: `Notice: Execution "e124" has succeeded in "prod".`,
Body: `Execution "e124" has succeeded in "prod". View details at https://example.com/executions/proj/prod/e124.`,
}
- assert.True(t, emailMessage.Body == expected.Body)
- assert.True(t, emailMessage.SubjectLine == expected.SubjectLine)
- assert.True(t, emailMessage.SenderEmail == expected.SenderEmail)
- assert.True(t, len(emailMessage.RecipientsEmail) == len(expected.RecipientsEmail))
+ assert.True(t, emailMessage.GetBody() == expected.GetBody())
+ assert.True(t, emailMessage.GetSubjectLine() == expected.GetSubjectLine())
+ assert.True(t, emailMessage.GetSenderEmail() == expected.GetSenderEmail())
+ assert.True(t, len(emailMessage.GetRecipientsEmail()) == len(expected.GetRecipientsEmail()))
}
diff --git a/flyteadmin/pkg/async/notifications/implementations/aws_emailer.go b/flyteadmin/pkg/async/notifications/implementations/aws_emailer.go
index 712bd7080d..d9891d616b 100644
--- a/flyteadmin/pkg/async/notifications/implementations/aws_emailer.go
+++ b/flyteadmin/pkg/async/notifications/implementations/aws_emailer.go
@@ -23,7 +23,7 @@ type AwsEmailer struct {
func FlyteEmailToSesEmailInput(email *admin.EmailMessage) ses.SendEmailInput {
var toAddress []*string
- for _, toEmail := range email.RecipientsEmail {
+ for _, toEmail := range email.GetRecipientsEmail() {
// SES email input takes an array of pointers to strings so we have to create a new one for each email
//nolint:unconvert
e := string(toEmail)
@@ -61,7 +61,7 @@ func (e *AwsEmailer) SendEmail(ctx context.Context, email *admin.EmailMessage) e
e.systemMetrics.SendError.Inc()
return errors.NewFlyteAdminErrorf(codes.Internal, "errors were seen while sending emails")
}
- logger.Debugf(ctx, "Sent email to %s sub: %s", email.RecipientsEmail, email.SubjectLine)
+ logger.Debugf(ctx, "Sent email to %s sub: %s", email.GetRecipientsEmail(), email.GetSubjectLine())
e.systemMetrics.SendSuccess.Inc()
return nil
}
diff --git a/flyteadmin/pkg/async/notifications/implementations/aws_emailer_test.go b/flyteadmin/pkg/async/notifications/implementations/aws_emailer_test.go
index 01a2a06273..1caf1ce3a1 100644
--- a/flyteadmin/pkg/async/notifications/implementations/aws_emailer_test.go
+++ b/flyteadmin/pkg/async/notifications/implementations/aws_emailer_test.go
@@ -46,18 +46,18 @@ func TestAwsEmailer_SendEmail(t *testing.T) {
sendEmailValidationFunc := func(input *ses.SendEmailInput) (*ses.SendEmailOutput, error) {
assert.Equal(t, *input.Source, expectedSenderEmail)
- assert.Equal(t, *input.Message.Body.Html.Data, emailNotification.Body)
- assert.Equal(t, *input.Message.Subject.Data, emailNotification.SubjectLine)
+ assert.Equal(t, *input.Message.Body.Html.Data, emailNotification.GetBody())
+ assert.Equal(t, *input.Message.Subject.Data, emailNotification.GetSubjectLine())
for _, toEmail := range input.Destination.ToAddresses {
var foundEmail = false
- for _, verifyToEmail := range emailNotification.RecipientsEmail {
+ for _, verifyToEmail := range emailNotification.GetRecipientsEmail() {
if *toEmail == verifyToEmail {
foundEmail = true
}
}
assert.Truef(t, foundEmail, "To Email address [%s] wasn't apart of original inputs.", *toEmail)
}
- assert.Equal(t, len(input.Destination.ToAddresses), len(emailNotification.RecipientsEmail))
+ assert.Equal(t, len(input.Destination.ToAddresses), len(emailNotification.GetRecipientsEmail()))
return &ses.SendEmailOutput{}, nil
}
mockAwsEmail.SetSendEmailFunc(sendEmailValidationFunc)
@@ -80,8 +80,8 @@ func TestFlyteEmailToSesEmailInput(t *testing.T) {
}
sesEmailInput := FlyteEmailToSesEmailInput(emailNotification)
- assert.Equal(t, *sesEmailInput.Destination.ToAddresses[0], emailNotification.RecipientsEmail[0])
- assert.Equal(t, *sesEmailInput.Destination.ToAddresses[1], emailNotification.RecipientsEmail[1])
+ assert.Equal(t, *sesEmailInput.Destination.ToAddresses[0], emailNotification.GetRecipientsEmail()[0])
+ assert.Equal(t, *sesEmailInput.Destination.ToAddresses[1], emailNotification.GetRecipientsEmail()[1])
assert.Equal(t, *sesEmailInput.Message.Subject.Data, "Notice: Execution \"name\" has succeeded in \"domain\".")
}
diff --git a/flyteadmin/pkg/async/notifications/implementations/aws_processor_test.go b/flyteadmin/pkg/async/notifications/implementations/aws_processor_test.go
index e566fdd740..611cebceb2 100644
--- a/flyteadmin/pkg/async/notifications/implementations/aws_processor_test.go
+++ b/flyteadmin/pkg/async/notifications/implementations/aws_processor_test.go
@@ -31,10 +31,10 @@ func TestProcessor_StartProcessing(t *testing.T) {
testSubscriber.JSONMessages = append(testSubscriber.JSONMessages, testSubscriberMessage)
sendEmailValidationFunc := func(ctx context.Context, email *admin.EmailMessage) error {
- assert.Equal(t, email.Body, testEmail.Body)
- assert.Equal(t, email.RecipientsEmail, testEmail.RecipientsEmail)
- assert.Equal(t, email.SubjectLine, testEmail.SubjectLine)
- assert.Equal(t, email.SenderEmail, testEmail.SenderEmail)
+ assert.Equal(t, email.GetBody(), testEmail.GetBody())
+ assert.Equal(t, email.GetRecipientsEmail(), testEmail.GetRecipientsEmail())
+ assert.Equal(t, email.GetSubjectLine(), testEmail.GetSubjectLine())
+ assert.Equal(t, email.GetSenderEmail(), testEmail.GetSenderEmail())
return nil
}
mockEmailer.SetSendEmailFunc(sendEmailValidationFunc)
diff --git a/flyteadmin/pkg/async/notifications/implementations/gcp_processor_test.go b/flyteadmin/pkg/async/notifications/implementations/gcp_processor_test.go
index d48efeeee9..6d0f7d87fe 100644
--- a/flyteadmin/pkg/async/notifications/implementations/gcp_processor_test.go
+++ b/flyteadmin/pkg/async/notifications/implementations/gcp_processor_test.go
@@ -35,10 +35,10 @@ func TestGcpProcessor_StartProcessing(t *testing.T) {
testGcpProcessor := NewGcpProcessor(&testGcpSubscriber, &mockGcpEmailer, promutils.NewTestScope())
sendEmailValidationFunc := func(ctx context.Context, email *admin.EmailMessage) error {
- assert.Equal(t, email.Body, testEmail.Body)
- assert.Equal(t, email.RecipientsEmail, testEmail.RecipientsEmail)
- assert.Equal(t, email.SubjectLine, testEmail.SubjectLine)
- assert.Equal(t, email.SenderEmail, testEmail.SenderEmail)
+ assert.Equal(t, email.GetBody(), testEmail.GetBody())
+ assert.Equal(t, email.GetRecipientsEmail(), testEmail.GetRecipientsEmail())
+ assert.Equal(t, email.GetSubjectLine(), testEmail.GetSubjectLine())
+ assert.Equal(t, email.GetSenderEmail(), testEmail.GetSenderEmail())
return nil
}
mockGcpEmailer.SetSendEmailFunc(sendEmailValidationFunc)
diff --git a/flyteadmin/pkg/async/notifications/implementations/noop_notifications.go b/flyteadmin/pkg/async/notifications/implementations/noop_notifications.go
index 03dfa063ea..adae0d92fc 100644
--- a/flyteadmin/pkg/async/notifications/implementations/noop_notifications.go
+++ b/flyteadmin/pkg/async/notifications/implementations/noop_notifications.go
@@ -16,7 +16,7 @@ type NoopEmail struct{}
func (n *NoopEmail) SendEmail(ctx context.Context, email *admin.EmailMessage) error {
logger.Debugf(ctx, "received noop SendEmail request with subject [%s] and recipient [%s]",
- email.SubjectLine, strings.Join(email.RecipientsEmail, ","))
+ email.GetSubjectLine(), strings.Join(email.GetRecipientsEmail(), ","))
return nil
}
diff --git a/flyteadmin/pkg/async/notifications/implementations/sandbox_processor_test.go b/flyteadmin/pkg/async/notifications/implementations/sandbox_processor_test.go
index 83594284a9..17251ca723 100644
--- a/flyteadmin/pkg/async/notifications/implementations/sandbox_processor_test.go
+++ b/flyteadmin/pkg/async/notifications/implementations/sandbox_processor_test.go
@@ -20,10 +20,10 @@ func TestSandboxProcessor_StartProcessingSuccess(t *testing.T) {
testSandboxProcessor := NewSandboxProcessor(msgChan, &mockSandboxEmailer)
sendEmailValidationFunc := func(ctx context.Context, email *admin.EmailMessage) error {
- assert.Equal(t, testEmail.Body, email.Body)
- assert.Equal(t, testEmail.RecipientsEmail, email.RecipientsEmail)
- assert.Equal(t, testEmail.SubjectLine, email.SubjectLine)
- assert.Equal(t, testEmail.SenderEmail, email.SenderEmail)
+ assert.Equal(t, testEmail.GetBody(), email.GetBody())
+ assert.Equal(t, testEmail.GetRecipientsEmail(), email.GetRecipientsEmail())
+ assert.Equal(t, testEmail.GetSubjectLine(), email.GetSubjectLine())
+ assert.Equal(t, testEmail.GetSenderEmail(), email.GetSenderEmail())
return nil
}
diff --git a/flyteadmin/pkg/async/notifications/implementations/sendgrid_emailer.go b/flyteadmin/pkg/async/notifications/implementations/sendgrid_emailer.go
index c8386bd41e..a325cbee75 100644
--- a/flyteadmin/pkg/async/notifications/implementations/sendgrid_emailer.go
+++ b/flyteadmin/pkg/async/notifications/implementations/sendgrid_emailer.go
@@ -34,15 +34,15 @@ func getSendgridEmail(adminEmail *admin.EmailMessage) *mail.SGMailV3 {
m := mail.NewV3Mail()
// This from email address is really here as a formality. For sendgrid specifically, the sender email is determined
// from the api key that's used, not what you send along here.
- from := mail.NewEmail("Flyte Notifications", adminEmail.SenderEmail)
- content := mail.NewContent("text/html", adminEmail.Body)
+ from := mail.NewEmail("Flyte Notifications", adminEmail.GetSenderEmail())
+ content := mail.NewContent("text/html", adminEmail.GetBody())
m.SetFrom(from)
m.AddContent(content)
personalization := mail.NewPersonalization()
- emailAddresses := getEmailAddresses(adminEmail.RecipientsEmail)
+ emailAddresses := getEmailAddresses(adminEmail.GetRecipientsEmail())
personalization.AddTos(emailAddresses...)
- personalization.Subject = adminEmail.SubjectLine
+ personalization.Subject = adminEmail.GetSubjectLine()
m.AddPersonalizations(personalization)
return m
diff --git a/flyteadmin/pkg/async/notifications/implementations/smtp_emailer.go b/flyteadmin/pkg/async/notifications/implementations/smtp_emailer.go
index 5a705bc0c1..29a79b1589 100644
--- a/flyteadmin/pkg/async/notifications/implementations/smtp_emailer.go
+++ b/flyteadmin/pkg/async/notifications/implementations/smtp_emailer.go
@@ -72,11 +72,11 @@ func (s *SMTPEmailer) SendEmail(ctx context.Context, email *admin.EmailMessage)
s.smtpClient = smtpClient
}
- if err := s.smtpClient.Mail(email.SenderEmail); err != nil {
+ if err := s.smtpClient.Mail(email.GetSenderEmail()); err != nil {
return s.emailError(ctx, fmt.Sprintf("Error creating email instance: %s", err))
}
- for _, recipient := range email.RecipientsEmail {
+ for _, recipient := range email.GetRecipientsEmail() {
if err := s.smtpClient.Rcpt(recipient); err != nil {
return s.emailError(ctx, fmt.Sprintf("Error adding email recipient: %s", err))
}
@@ -113,8 +113,8 @@ func (s *SMTPEmailer) emailError(ctx context.Context, error string) error {
func createMailBody(emailSender string, email *admin.EmailMessage) string {
headerMap := make(map[string]string)
headerMap["From"] = emailSender
- headerMap["To"] = strings.Join(email.RecipientsEmail, ",")
- headerMap["Subject"] = email.SubjectLine
+ headerMap["To"] = strings.Join(email.GetRecipientsEmail(), ",")
+ headerMap["Subject"] = email.GetSubjectLine()
headerMap["Content-Type"] = "text/html; charset=\"UTF-8\""
mailMessage := ""
@@ -123,7 +123,7 @@ func createMailBody(emailSender string, email *admin.EmailMessage) string {
mailMessage += fmt.Sprintf("%s: %s\r\n", k, v)
}
- mailMessage += "\r\n" + email.Body
+ mailMessage += "\r\n" + email.GetBody()
return mailMessage
}
@@ -140,7 +140,7 @@ func NewSMTPEmailer(ctx context.Context, config runtimeInterfaces.NotificationsC
auth := smtp.PlainAuth("", emailConf.SMTPUsername, smtpPassword, emailConf.SMTPServer)
- // #nosec G402
+ // #nosec G402: Allow skipping TLS verification in specific environments.
tlsConfiguration = &tls.Config{
InsecureSkipVerify: emailConf.SMTPSkipTLSVerify,
ServerName: emailConf.SMTPServer,
diff --git a/flyteadmin/pkg/async/schedule/aws/cloud_watch_scheduler.go b/flyteadmin/pkg/async/schedule/aws/cloud_watch_scheduler.go
index 9c3cb166b5..d4249e9122 100644
--- a/flyteadmin/pkg/async/schedule/aws/cloud_watch_scheduler.go
+++ b/flyteadmin/pkg/async/schedule/aws/cloud_watch_scheduler.go
@@ -78,7 +78,7 @@ func getScheduleName(scheduleNamePrefix string, identifier *core.Identifier) str
func getScheduleDescription(identifier *core.Identifier) string {
return fmt.Sprintf(scheduleDescriptionFormat,
- identifier.Project, identifier.Domain, identifier.Name)
+ identifier.GetProject(), identifier.GetDomain(), identifier.GetName())
}
func getScheduleExpression(schedule *admin.Schedule) (string, error) {
@@ -88,11 +88,11 @@ func getScheduleExpression(schedule *admin.Schedule) (string, error) {
if schedule.GetRate() != nil {
// AWS uses pluralization for units of values not equal to 1.
// See https://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html
- unit := strings.ToLower(schedule.GetRate().Unit.String())
- if schedule.GetRate().Value != 1 {
+ unit := strings.ToLower(schedule.GetRate().GetUnit().String())
+ if schedule.GetRate().GetValue() != 1 {
unit = fmt.Sprintf("%ss", unit)
}
- return fmt.Sprintf(rateExpression, schedule.GetRate().Value, unit), nil
+ return fmt.Sprintf(rateExpression, schedule.GetRate().GetValue(), unit), nil
}
logger.Debugf(context.Background(), "scheduler encountered invalid schedule expression: %s", schedule.String())
return "", errors.NewFlyteAdminErrorf(codes.InvalidArgument, "unrecognized schedule expression")
@@ -176,9 +176,9 @@ func (s *cloudWatchScheduler) CreateScheduleInput(ctx context.Context, appConfig
payload, err := SerializeScheduleWorkflowPayload(
schedule.GetKickoffTimeInputArg(),
&admin.NamedEntityIdentifier{
- Project: identifier.Project,
- Domain: identifier.Domain,
- Name: identifier.Name,
+ Project: identifier.GetProject(),
+ Domain: identifier.GetDomain(),
+ Name: identifier.GetName(),
})
if err != nil {
logger.Errorf(ctx, "failed to serialize schedule workflow payload for launch plan: %v with err: %v",
diff --git a/flyteadmin/pkg/async/schedule/aws/shared.go b/flyteadmin/pkg/async/schedule/aws/shared.go
index 3868e05799..e21b25ed5b 100644
--- a/flyteadmin/pkg/async/schedule/aws/shared.go
+++ b/flyteadmin/pkg/async/schedule/aws/shared.go
@@ -12,7 +12,7 @@ import (
func hashIdentifier(identifier *core.Identifier) uint64 {
h := fnv.New64()
_, err := h.Write([]byte(fmt.Sprintf(scheduleNameInputsFormat,
- identifier.Project, identifier.Domain, identifier.Name)))
+ identifier.GetProject(), identifier.GetDomain(), identifier.GetName())))
if err != nil {
// This shouldn't occur.
logger.Errorf(context.Background(),
diff --git a/flyteadmin/pkg/async/schedule/aws/workflow_executor.go b/flyteadmin/pkg/async/schedule/aws/workflow_executor.go
index c4a5d75d14..d9a21c9026 100644
--- a/flyteadmin/pkg/async/schedule/aws/workflow_executor.go
+++ b/flyteadmin/pkg/async/schedule/aws/workflow_executor.go
@@ -63,18 +63,18 @@ var doNotconsumeBase64 = false
func (e *workflowExecutor) resolveKickoffTimeArg(
request ScheduledWorkflowExecutionRequest, launchPlan *admin.LaunchPlan,
executionRequest *admin.ExecutionCreateRequest) error {
- if request.KickoffTimeArg == "" || launchPlan.Closure.ExpectedInputs == nil {
+ if request.KickoffTimeArg == "" || launchPlan.GetClosure().GetExpectedInputs() == nil {
logger.Debugf(context.Background(), "No kickoff time to resolve for scheduled workflow execution: [%s/%s/%s]",
- executionRequest.Project, executionRequest.Domain, executionRequest.Name)
+ executionRequest.GetProject(), executionRequest.GetDomain(), executionRequest.GetName())
return nil
}
- for name := range launchPlan.Closure.ExpectedInputs.Parameters {
+ for name := range launchPlan.GetClosure().GetExpectedInputs().GetParameters() {
if name == request.KickoffTimeArg {
ts, err := ptypes.TimestampProto(request.KickoffTime)
if err != nil {
logger.Warningf(context.Background(),
"failed to serialize kickoff time %+v to timestamp proto for scheduled workflow execution with "+
- "launchPlan [%+v]", request.KickoffTime, launchPlan.Id)
+ "launchPlan [%+v]", request.KickoffTime, launchPlan.GetId())
return errors.NewFlyteAdminErrorf(
codes.Internal, "could not serialize kickoff time %+v to timestamp proto", request.KickoffTime)
}
@@ -96,7 +96,7 @@ func (e *workflowExecutor) resolveKickoffTimeArg(
}
logger.Warningf(context.Background(),
"expected kickoff time arg with launch plan [%+v] but did not find any matching expected input to resolve",
- launchPlan.Id)
+ launchPlan.GetId())
return nil
}
@@ -112,23 +112,24 @@ func (e *workflowExecutor) getActiveLaunchPlanVersion(launchPlanIdentifier *admi
e.metrics.NoActiveLaunchPlanVersionsFound.Inc()
return &admin.LaunchPlan{}, err
}
- if len(launchPlans.LaunchPlans) != 1 {
+ if len(launchPlans.GetLaunchPlans()) != 1 {
e.metrics.GreaterThan1LaunchPlanVersionsFound.Inc()
logger.Warningf(context.Background(), "failed to get exactly one active launch plan for identifier: %+v",
launchPlanIdentifier)
return &admin.LaunchPlan{}, errors.NewFlyteAdminErrorf(codes.Internal,
"failed to get exactly one active launch plan for identifier: %+v", launchPlanIdentifier)
}
- return launchPlans.LaunchPlans[0], nil
+ return launchPlans.GetLaunchPlans()[0], nil
}
func generateExecutionName(launchPlan *admin.LaunchPlan, kickoffTime time.Time) string {
hashedIdentifier := hashIdentifier(&core.Identifier{
- Project: launchPlan.Id.Project,
- Domain: launchPlan.Id.Domain,
- Name: launchPlan.Id.Name,
+ Project: launchPlan.GetId().GetProject(),
+ Domain: launchPlan.GetId().GetDomain(),
+ Name: launchPlan.GetId().GetName(),
})
- randomSeed := kickoffTime.UnixNano() + int64(hashedIdentifier)
+ randomSeed := kickoffTime.UnixNano() + int64(hashedIdentifier) // #nosec G115
+
return common.GetExecutionName(randomSeed)
}
@@ -137,7 +138,7 @@ func (e *workflowExecutor) formulateExecutionCreateRequest(
// Deterministically assign a name based on the schedule kickoff time/launch plan definition.
name := generateExecutionName(launchPlan, kickoffTime)
logger.Debugf(context.Background(), "generated name [%s] for scheduled execution with launch plan [%+v]",
- name, launchPlan.Id)
+ name, launchPlan.GetId())
kickoffTimeProto, err := ptypes.TimestampProto(kickoffTime)
if err != nil {
// We expected that kickoff times are valid (in order for a scheduled event to fire).
@@ -148,11 +149,11 @@ func (e *workflowExecutor) formulateExecutionCreateRequest(
kickoffTime, err)
}
executionRequest := &admin.ExecutionCreateRequest{
- Project: launchPlan.Id.Project,
- Domain: launchPlan.Id.Domain,
+ Project: launchPlan.GetId().GetProject(),
+ Domain: launchPlan.GetId().GetDomain(),
Name: name,
Spec: &admin.ExecutionSpec{
- LaunchPlan: launchPlan.Id,
+ LaunchPlan: launchPlan.GetId(),
Metadata: &admin.ExecutionMetadata{
Mode: admin.ExecutionMetadata_SCHEDULED,
ScheduledAt: kickoffTimeProto,
@@ -208,8 +209,8 @@ func (e *workflowExecutor) run() error {
}
executionRequest := e.formulateExecutionCreateRequest(launchPlan, scheduledWorkflowExecutionRequest.KickoffTime)
- ctx = contextutils.WithWorkflowID(ctx, fmt.Sprintf(workflowIdentifierFmt, executionRequest.Project,
- executionRequest.Domain, executionRequest.Name))
+ ctx = contextutils.WithWorkflowID(ctx, fmt.Sprintf(workflowIdentifierFmt, executionRequest.GetProject(),
+ executionRequest.GetDomain(), executionRequest.GetName()))
err = e.resolveKickoffTimeArg(scheduledWorkflowExecutionRequest, launchPlan, executionRequest)
if err != nil {
e.metrics.FailedResolveKickoffTimeArg.Inc()
@@ -228,12 +229,12 @@ func (e *workflowExecutor) run() error {
if ok && ec.Code() != codes.AlreadyExists {
e.metrics.FailedKickoffExecution.Inc()
logger.Errorf(context.Background(), "failed to execute scheduled workflow [%s:%s:%s] with err: %v",
- executionRequest.Project, executionRequest.Domain, executionRequest.Name, err)
+ executionRequest.GetProject(), executionRequest.GetDomain(), executionRequest.GetName(), err)
continue
}
} else {
logger.Debugf(context.Background(), "created scheduled workflow execution %+v with kickoff time %+v",
- response.Id, scheduledWorkflowExecutionRequest.KickoffTime)
+ response.GetId(), scheduledWorkflowExecutionRequest.KickoffTime)
}
executionLaunchTime := time.Now()
diff --git a/flyteadmin/pkg/async/schedule/aws/workflow_executor_test.go b/flyteadmin/pkg/async/schedule/aws/workflow_executor_test.go
index f6fc9b9693..38f8afddbd 100644
--- a/flyteadmin/pkg/async/schedule/aws/workflow_executor_test.go
+++ b/flyteadmin/pkg/async/schedule/aws/workflow_executor_test.go
@@ -82,9 +82,9 @@ func TestResolveKickoffTimeArg(t *testing.T) {
},
}
executionRequest := &admin.ExecutionCreateRequest{
- Project: testIdentifier.Project,
- Domain: testIdentifier.Domain,
- Name: testIdentifier.Name,
+ Project: testIdentifier.GetProject(),
+ Domain: testIdentifier.GetDomain(),
+ Name: testIdentifier.GetName(),
Inputs: &core.LiteralMap{
Literals: map[string]*core.Literal{},
},
@@ -92,9 +92,9 @@ func TestResolveKickoffTimeArg(t *testing.T) {
testExecutor := newWorkflowExecutorForTest(nil, nil, nil)
err := testExecutor.resolveKickoffTimeArg(scheduleRequest, launchPlan, executionRequest)
assert.Nil(t, err)
- assert.Contains(t, executionRequest.Inputs.Literals, testKickoffTime)
+ assert.Contains(t, executionRequest.GetInputs().GetLiterals(), testKickoffTime)
assert.Equal(t, testKickoffTimeProtoLiteral,
- executionRequest.Inputs.Literals[testKickoffTime])
+ executionRequest.GetInputs().GetLiterals()[testKickoffTime])
}
func TestResolveKickoffTimeArg_NoKickoffTimeArg(t *testing.T) {
@@ -112,9 +112,9 @@ func TestResolveKickoffTimeArg_NoKickoffTimeArg(t *testing.T) {
},
}
executionRequest := &admin.ExecutionCreateRequest{
- Project: testIdentifier.Project,
- Domain: testIdentifier.Domain,
- Name: testIdentifier.Name,
+ Project: testIdentifier.GetProject(),
+ Domain: testIdentifier.GetDomain(),
+ Name: testIdentifier.GetName(),
Inputs: &core.LiteralMap{
Literals: map[string]*core.Literal{},
},
@@ -122,7 +122,7 @@ func TestResolveKickoffTimeArg_NoKickoffTimeArg(t *testing.T) {
testExecutor := newWorkflowExecutorForTest(nil, nil, nil)
err := testExecutor.resolveKickoffTimeArg(scheduleRequest, launchPlan, executionRequest)
assert.Nil(t, err)
- assert.NotContains(t, executionRequest.Inputs.Literals, testKickoffTime)
+ assert.NotContains(t, executionRequest.GetInputs().GetLiterals(), testKickoffTime)
}
func TestGetActiveLaunchPlanVersion(t *testing.T) {
@@ -132,9 +132,9 @@ func TestGetActiveLaunchPlanVersion(t *testing.T) {
Name: "name",
}
launchPlanIdentifier := core.Identifier{
- Project: launchPlanNamedIdentifier.Project,
- Domain: launchPlanNamedIdentifier.Domain,
- Name: launchPlanNamedIdentifier.Name,
+ Project: launchPlanNamedIdentifier.GetProject(),
+ Domain: launchPlanNamedIdentifier.GetDomain(),
+ Name: launchPlanNamedIdentifier.GetName(),
Version: "foo",
}
@@ -142,9 +142,9 @@ func TestGetActiveLaunchPlanVersion(t *testing.T) {
launchPlanManager.(*mocks.MockLaunchPlanManager).SetListLaunchPlansCallback(
func(ctx context.Context, request *admin.ResourceListRequest) (
*admin.LaunchPlanList, error) {
- assert.True(t, proto.Equal(launchPlanNamedIdentifier, request.Id))
- assert.Equal(t, "eq(state,1)", request.Filters)
- assert.Equal(t, uint32(1), request.Limit)
+ assert.True(t, proto.Equal(launchPlanNamedIdentifier, request.GetId()))
+ assert.Equal(t, "eq(state,1)", request.GetFilters())
+ assert.Equal(t, uint32(1), request.GetLimit())
return &admin.LaunchPlanList{
LaunchPlans: []*admin.LaunchPlan{
{
@@ -156,7 +156,7 @@ func TestGetActiveLaunchPlanVersion(t *testing.T) {
testExecutor := newWorkflowExecutorForTest(nil, nil, launchPlanManager)
launchPlan, err := testExecutor.getActiveLaunchPlanVersion(launchPlanNamedIdentifier)
assert.Nil(t, err)
- assert.True(t, proto.Equal(&launchPlanIdentifier, launchPlan.Id))
+ assert.True(t, proto.Equal(&launchPlanIdentifier, launchPlan.GetId()))
}
func TestGetActiveLaunchPlanVersion_ManagerError(t *testing.T) {
@@ -198,13 +198,13 @@ func TestFormulateExecutionCreateRequest(t *testing.T) {
}
testExecutor := newWorkflowExecutorForTest(nil, nil, nil)
executionRequest := testExecutor.formulateExecutionCreateRequest(launchPlan, time.Unix(1543607788, 0))
- assert.Equal(t, "foo", executionRequest.Project)
- assert.Equal(t, "bar", executionRequest.Domain)
- assert.Equal(t, "a2k4s9v5j246kwmdmh4t", executionRequest.Name)
+ assert.Equal(t, "foo", executionRequest.GetProject())
+ assert.Equal(t, "bar", executionRequest.GetDomain())
+ assert.Equal(t, "a2k4s9v5j246kwmdmh4t", executionRequest.GetName())
- assert.True(t, proto.Equal(&launchPlanIdentifier, executionRequest.Spec.LaunchPlan))
- assert.Equal(t, admin.ExecutionMetadata_SCHEDULED, executionRequest.Spec.Metadata.Mode)
- assert.Equal(t, int64(1543607788), executionRequest.Spec.Metadata.ScheduledAt.Seconds)
+ assert.True(t, proto.Equal(&launchPlanIdentifier, executionRequest.GetSpec().GetLaunchPlan()))
+ assert.Equal(t, admin.ExecutionMetadata_SCHEDULED, executionRequest.GetSpec().GetMetadata().GetMode())
+ assert.Equal(t, int64(1543607788), executionRequest.GetSpec().GetMetadata().GetScheduledAt().GetSeconds())
}
func TestRun(t *testing.T) {
@@ -234,12 +234,12 @@ func TestRun(t *testing.T) {
testExecutionManager.SetCreateCallback(func(
ctx context.Context, request *admin.ExecutionCreateRequest, requestedAt time.Time) (
*admin.ExecutionCreateResponse, error) {
- assert.Equal(t, "project", request.Project)
- assert.Equal(t, "domain", request.Domain)
- assert.Equal(t, "ar8fphnlc5wh9dksjncj", request.Name)
+ assert.Equal(t, "project", request.GetProject())
+ assert.Equal(t, "domain", request.GetDomain())
+ assert.Equal(t, "ar8fphnlc5wh9dksjncj", request.GetName())
if messagesSeen == 0 {
- assert.Contains(t, request.Inputs.Literals, testKickoffTime)
- assert.Equal(t, testKickoffTimeProtoLiteral, request.Inputs.Literals[testKickoffTime])
+ assert.Contains(t, request.GetInputs().GetLiterals(), testKickoffTime)
+ assert.Equal(t, testKickoffTimeProtoLiteral, request.GetInputs().GetLiterals()[testKickoffTime])
}
messagesSeen++
return &admin.ExecutionCreateResponse{}, nil
@@ -248,10 +248,10 @@ func TestRun(t *testing.T) {
launchPlanManager.(*mocks.MockLaunchPlanManager).SetListLaunchPlansCallback(
func(ctx context.Context, request *admin.ResourceListRequest) (
*admin.LaunchPlanList, error) {
- assert.Equal(t, "project", request.Id.Project)
- assert.Equal(t, "domain", request.Id.Domain)
- assert.Equal(t, "eq(state,1)", request.Filters)
- assert.Equal(t, uint32(1), request.Limit)
+ assert.Equal(t, "project", request.GetId().GetProject())
+ assert.Equal(t, "domain", request.GetId().GetDomain())
+ assert.Equal(t, "eq(state,1)", request.GetFilters())
+ assert.Equal(t, uint32(1), request.GetLimit())
return &admin.LaunchPlanList{
LaunchPlans: []*admin.LaunchPlan{
{
diff --git a/flyteadmin/pkg/async/schedule/mocks/mock_event_scheduler.go b/flyteadmin/pkg/async/schedule/mocks/mock_event_scheduler.go
index fb9aebe34e..244cd0b40e 100644
--- a/flyteadmin/pkg/async/schedule/mocks/mock_event_scheduler.go
+++ b/flyteadmin/pkg/async/schedule/mocks/mock_event_scheduler.go
@@ -22,9 +22,9 @@ func (s *MockEventScheduler) CreateScheduleInput(ctx context.Context, appConfig
payload, _ := aws.SerializeScheduleWorkflowPayload(
schedule.GetKickoffTimeInputArg(),
&admin.NamedEntityIdentifier{
- Project: identifier.Project,
- Domain: identifier.Domain,
- Name: identifier.Name,
+ Project: identifier.GetProject(),
+ Domain: identifier.GetDomain(),
+ Name: identifier.GetName(),
})
return interfaces.AddScheduleInput{Identifier: identifier, ScheduleExpression: schedule, Payload: payload}, nil
}
diff --git a/flyteadmin/pkg/clusterresource/controller.go b/flyteadmin/pkg/clusterresource/controller.go
index 6ea1731909..37cfa46054 100644
--- a/flyteadmin/pkg/clusterresource/controller.go
+++ b/flyteadmin/pkg/clusterresource/controller.go
@@ -209,8 +209,8 @@ func (c *controller) getCustomTemplateValues(
collectedErrs = append(collectedErrs, err)
}
}
- if attributes != nil && attributes.Attributes != nil {
- for templateKey, templateValue := range attributes.Attributes {
+ if attributes != nil && attributes.GetAttributes() != nil {
+ for templateKey, templateValue := range attributes.GetAttributes() {
customTemplateValues[fmt.Sprintf(templateVariableFormat, templateKey)] = templateValue
}
}
@@ -481,8 +481,8 @@ func (c *controller) createResourceFromTemplate(ctx context.Context, templateDir
// First, add the special case namespace template which is always substituted by the system
// rather than fetched via a user-specified source.
templateValues[fmt.Sprintf(templateVariableFormat, namespaceVariable)] = namespace
- templateValues[fmt.Sprintf(templateVariableFormat, projectVariable)] = project.Id
- templateValues[fmt.Sprintf(templateVariableFormat, domainVariable)] = domain.Id
+ templateValues[fmt.Sprintf(templateVariableFormat, projectVariable)] = project.GetId()
+ templateValues[fmt.Sprintf(templateVariableFormat, domainVariable)] = domain.GetId()
var k8sManifest = string(template)
for templateKey, templateValue := range customTemplateValues {
@@ -587,11 +587,11 @@ func (c *controller) Sync(ctx context.Context) error {
stats := ResourceSyncStats{}
- for _, project := range projects.Projects {
- for _, domain := range project.Domains {
- namespace := common.GetNamespaceName(c.config.NamespaceMappingConfiguration().GetNamespaceTemplate(), project.Id, domain.Name)
+ for _, project := range projects.GetProjects() {
+ for _, domain := range project.GetDomains() {
+ namespace := common.GetNamespaceName(c.config.NamespaceMappingConfiguration().GetNamespaceTemplate(), project.GetId(), domain.GetName())
customTemplateValues, err := c.getCustomTemplateValues(
- ctx, project.Id, domain.Id, domainTemplateValues[domain.Id])
+ ctx, project.GetId(), domain.GetId(), domainTemplateValues[domain.GetId()])
if err != nil {
logger.Errorf(ctx, "Failed to get custom template values for %s with err: %v", namespace, err)
errs = append(errs, err)
diff --git a/flyteadmin/pkg/clusterresource/impl/admin_service_data_provider.go b/flyteadmin/pkg/clusterresource/impl/admin_service_data_provider.go
index 550637183d..16113edd23 100644
--- a/flyteadmin/pkg/clusterresource/impl/admin_service_data_provider.go
+++ b/flyteadmin/pkg/clusterresource/impl/admin_service_data_provider.go
@@ -25,9 +25,9 @@ func (p serviceAdminProvider) GetClusterResourceAttributes(ctx context.Context,
if err != nil {
return nil, err
}
- if resource != nil && resource.Attributes != nil && resource.Attributes.MatchingAttributes != nil &&
- resource.Attributes.MatchingAttributes.GetClusterResourceAttributes() != nil {
- return resource.Attributes.MatchingAttributes.GetClusterResourceAttributes(), nil
+ if resource != nil && resource.GetAttributes() != nil && resource.GetAttributes().GetMatchingAttributes() != nil &&
+ resource.GetAttributes().GetMatchingAttributes().GetClusterResourceAttributes() != nil {
+ return resource.GetAttributes().GetMatchingAttributes().GetClusterResourceAttributes(), nil
}
return nil, NewMissingEntityError("cluster resource attributes")
}
@@ -56,11 +56,11 @@ func (p serviceAdminProvider) GetProjects(ctx context.Context) (*admin.Projects,
if err != nil {
return nil, err
}
- projects = append(projects, projectResp.Projects...)
- if len(projectResp.Token) == 0 {
+ projects = append(projects, projectResp.GetProjects()...)
+ if len(projectResp.GetToken()) == 0 {
break
}
- listReq.Token = projectResp.Token
+ listReq.Token = projectResp.GetToken()
}
return &admin.Projects{
Projects: projects,
diff --git a/flyteadmin/pkg/clusterresource/impl/admin_service_data_provider_test.go b/flyteadmin/pkg/clusterresource/impl/admin_service_data_provider_test.go
index 182c9e2573..0bd1390f50 100644
--- a/flyteadmin/pkg/clusterresource/impl/admin_service_data_provider_test.go
+++ b/flyteadmin/pkg/clusterresource/impl/admin_service_data_provider_test.go
@@ -24,7 +24,7 @@ func TestServiceGetClusterResourceAttributes(t *testing.T) {
}
mockAdmin := mocks.AdminServiceClient{}
mockAdmin.OnGetProjectDomainAttributesMatch(ctx, mock.MatchedBy(func(req *admin.ProjectDomainAttributesGetRequest) bool {
- return req.Project == project && req.Domain == domain && req.ResourceType == admin.MatchableResource_CLUSTER_RESOURCE
+ return req.GetProject() == project && req.GetDomain() == domain && req.GetResourceType() == admin.MatchableResource_CLUSTER_RESOURCE
})).Return(&admin.ProjectDomainAttributesGetResponse{
Attributes: &admin.ProjectDomainAttributes{
MatchingAttributes: &admin.MatchingAttributes{
@@ -42,12 +42,12 @@ func TestServiceGetClusterResourceAttributes(t *testing.T) {
}
attrs, err := provider.GetClusterResourceAttributes(context.TODO(), project, domain)
assert.NoError(t, err)
- assert.EqualValues(t, attrs.Attributes, attributes)
+ assert.EqualValues(t, attrs.GetAttributes(), attributes)
})
t.Run("admin service error", func(t *testing.T) {
mockAdmin := mocks.AdminServiceClient{}
mockAdmin.OnGetProjectDomainAttributesMatch(ctx, mock.MatchedBy(func(req *admin.ProjectDomainAttributesGetRequest) bool {
- return req.Project == project && req.Domain == domain && req.ResourceType == admin.MatchableResource_CLUSTER_RESOURCE
+ return req.GetProject() == project && req.GetDomain() == domain && req.GetResourceType() == admin.MatchableResource_CLUSTER_RESOURCE
})).Return(&admin.ProjectDomainAttributesGetResponse{}, errFoo)
provider := serviceAdminProvider{
@@ -59,7 +59,7 @@ func TestServiceGetClusterResourceAttributes(t *testing.T) {
t.Run("wonky admin service response", func(t *testing.T) {
mockAdmin := mocks.AdminServiceClient{}
mockAdmin.OnGetProjectDomainAttributesMatch(ctx, mock.MatchedBy(func(req *admin.ProjectDomainAttributesGetRequest) bool {
- return req.Project == project && req.Domain == domain && req.ResourceType == admin.MatchableResource_CLUSTER_RESOURCE
+ return req.GetProject() == project && req.GetDomain() == domain && req.GetResourceType() == admin.MatchableResource_CLUSTER_RESOURCE
})).Return(&admin.ProjectDomainAttributesGetResponse{
Attributes: &admin.ProjectDomainAttributes{
MatchingAttributes: &admin.MatchingAttributes{
@@ -88,7 +88,7 @@ func TestServiceGetProjects(t *testing.T) {
t.Run("happy case", func(t *testing.T) {
mockAdmin := mocks.AdminServiceClient{}
mockAdmin.OnListProjectsMatch(ctx, mock.MatchedBy(func(req *admin.ProjectListRequest) bool {
- return req.Limit == 100 && req.Filters == "ne(state,1)" && req.SortBy.Key == "created_at"
+ return req.GetLimit() == 100 && req.GetFilters() == "ne(state,1)" && req.GetSortBy().GetKey() == "created_at"
})).Return(&admin.Projects{
Projects: []*admin.Project{
{
@@ -104,12 +104,12 @@ func TestServiceGetProjects(t *testing.T) {
}
projects, err := provider.GetProjects(ctx)
assert.NoError(t, err)
- assert.Len(t, projects.Projects, 2)
+ assert.Len(t, projects.GetProjects(), 2)
})
t.Run("admin error", func(t *testing.T) {
mockAdmin := mocks.AdminServiceClient{}
mockAdmin.OnListProjectsMatch(ctx, mock.MatchedBy(func(req *admin.ProjectListRequest) bool {
- return req.Limit == 100 && req.Filters == "ne(state,1)" && req.SortBy.Key == "created_at"
+ return req.GetLimit() == 100 && req.GetFilters() == "ne(state,1)" && req.GetSortBy().GetKey() == "created_at"
})).Return(nil, errFoo)
provider := serviceAdminProvider{
adminClient: &mockAdmin,
diff --git a/flyteadmin/pkg/clusterresource/impl/db_admin_data_provider_test.go b/flyteadmin/pkg/clusterresource/impl/db_admin_data_provider_test.go
index 81ba4805ba..7fa0039799 100644
--- a/flyteadmin/pkg/clusterresource/impl/db_admin_data_provider_test.go
+++ b/flyteadmin/pkg/clusterresource/impl/db_admin_data_provider_test.go
@@ -49,7 +49,7 @@ func TestGetClusterResourceAttributes(t *testing.T) {
}
attrs, err := provider.GetClusterResourceAttributes(context.TODO(), project, domain)
assert.NoError(t, err)
- assert.EqualValues(t, attrs.Attributes, attributes)
+ assert.EqualValues(t, attrs.GetAttributes(), attributes)
})
t.Run("error", func(t *testing.T) {
resourceManager.GetResourceFunc = func(ctx context.Context, request interfaces.ResourceRequest) (*interfaces.ResourceResponse, error) {
@@ -125,7 +125,7 @@ func TestGetProjects(t *testing.T) {
}
projects, err := provider.GetProjects(context.TODO())
assert.NoError(t, err)
- assert.Len(t, projects.Projects, 2)
+ assert.Len(t, projects.GetProjects(), 2)
})
t.Run("db error", func(t *testing.T) {
mockRepo := repoMocks.NewMockRepository()
diff --git a/flyteadmin/pkg/common/flyte_url.go b/flyteadmin/pkg/common/flyte_url.go
index f5245ac238..e4c005d902 100644
--- a/flyteadmin/pkg/common/flyte_url.go
+++ b/flyteadmin/pkg/common/flyte_url.go
@@ -108,7 +108,7 @@ func ParseFlyteURLToExecution(flyteURL string) (ParsedExecution, error) {
taskExecID := core.TaskExecutionIdentifier{
NodeExecutionId: &nodeExecID,
// checking for overflow here is probably unreasonable
- RetryAttempt: uint32(a),
+ RetryAttempt: uint32(a), // #nosec G115
}
return ParsedExecution{
PartialTaskExecID: &taskExecID,
@@ -126,8 +126,8 @@ func ParseFlyteURLToExecution(flyteURL string) (ParsedExecution, error) {
}
func FlyteURLsFromNodeExecutionID(nodeExecutionID *core.NodeExecutionIdentifier, deck bool) *admin.FlyteURLs {
- base := fmt.Sprintf("flyte://v1/%s/%s/%s/%s", nodeExecutionID.ExecutionId.Project,
- nodeExecutionID.ExecutionId.Domain, nodeExecutionID.ExecutionId.Name, nodeExecutionID.NodeId)
+ base := fmt.Sprintf("flyte://v1/%s/%s/%s/%s", nodeExecutionID.GetExecutionId().GetProject(),
+ nodeExecutionID.GetExecutionId().GetDomain(), nodeExecutionID.GetExecutionId().GetName(), nodeExecutionID.GetNodeId())
res := &admin.FlyteURLs{
Inputs: fmt.Sprintf("%s/%s", base, ArtifactTypeI),
@@ -143,7 +143,7 @@ func FlyteURLsFromNodeExecutionID(nodeExecutionID *core.NodeExecutionIdentifier,
// This constructs a fully unique prefix, and when post-pended with the output name, forms a fully unique name for
// the artifact service (including the project/domain of course, which the artifact service will add).
func FlyteURLKeyFromNodeExecutionID(nodeExecutionID *core.NodeExecutionIdentifier) string {
- res := fmt.Sprintf("%s/%s", nodeExecutionID.ExecutionId.Name, nodeExecutionID.NodeId)
+ res := fmt.Sprintf("%s/%s", nodeExecutionID.GetExecutionId().GetName(), nodeExecutionID.GetNodeId())
return res
}
@@ -151,14 +151,14 @@ func FlyteURLKeyFromNodeExecutionID(nodeExecutionID *core.NodeExecutionIdentifie
// FlyteURLKeyFromNodeExecutionIDRetry is a modified version of the function above.
// See the uniqueness comment above.
func FlyteURLKeyFromNodeExecutionIDRetry(nodeExecutionID *core.NodeExecutionIdentifier, retry int) string {
- res := fmt.Sprintf("%s/%s/%s", nodeExecutionID.ExecutionId.Name, nodeExecutionID.NodeId, strconv.Itoa(retry))
+ res := fmt.Sprintf("%s/%s/%s", nodeExecutionID.GetExecutionId().GetName(), nodeExecutionID.GetNodeId(), strconv.Itoa(retry))
return res
}
func FlyteURLsFromTaskExecutionID(taskExecutionID *core.TaskExecutionIdentifier, deck bool) *admin.FlyteURLs {
- base := fmt.Sprintf("flyte://v1/%s/%s/%s/%s/%s", taskExecutionID.NodeExecutionId.ExecutionId.Project,
- taskExecutionID.NodeExecutionId.ExecutionId.Domain, taskExecutionID.NodeExecutionId.ExecutionId.Name, taskExecutionID.NodeExecutionId.NodeId, strconv.Itoa(int(taskExecutionID.RetryAttempt)))
+ base := fmt.Sprintf("flyte://v1/%s/%s/%s/%s/%s", taskExecutionID.GetNodeExecutionId().GetExecutionId().GetProject(),
+ taskExecutionID.GetNodeExecutionId().GetExecutionId().GetDomain(), taskExecutionID.GetNodeExecutionId().GetExecutionId().GetName(), taskExecutionID.GetNodeExecutionId().GetNodeId(), strconv.Itoa(int(taskExecutionID.GetRetryAttempt())))
res := &admin.FlyteURLs{
Inputs: fmt.Sprintf("%s/%s", base, ArtifactTypeI),
diff --git a/flyteadmin/pkg/common/flyte_url_test.go b/flyteadmin/pkg/common/flyte_url_test.go
index a0cbfcda2b..bd954c5bb6 100644
--- a/flyteadmin/pkg/common/flyte_url_test.go
+++ b/flyteadmin/pkg/common/flyte_url_test.go
@@ -197,11 +197,11 @@ func TestParseFlyteURLToExecution(t *testing.T) {
x, err := ParseFlyteURLToExecution("flyte://v1/fs/dev/abc/n0/3/o/o0")
assert.NoError(t, err)
assert.Nil(t, x.NodeExecID)
- assert.Nil(t, x.PartialTaskExecID.TaskId)
- assert.Equal(t, "fs", x.PartialTaskExecID.NodeExecutionId.ExecutionId.Project)
- assert.Equal(t, "dev", x.PartialTaskExecID.NodeExecutionId.ExecutionId.Domain)
- assert.Equal(t, "abc", x.PartialTaskExecID.NodeExecutionId.ExecutionId.Name)
- assert.Equal(t, "n0", x.PartialTaskExecID.NodeExecutionId.NodeId)
+ assert.Nil(t, x.PartialTaskExecID.GetTaskId())
+ assert.Equal(t, "fs", x.PartialTaskExecID.GetNodeExecutionId().GetExecutionId().GetProject())
+ assert.Equal(t, "dev", x.PartialTaskExecID.GetNodeExecutionId().GetExecutionId().GetDomain())
+ assert.Equal(t, "abc", x.PartialTaskExecID.GetNodeExecutionId().GetExecutionId().GetName())
+ assert.Equal(t, "n0", x.PartialTaskExecID.GetNodeExecutionId().GetNodeId())
assert.Equal(t, uint32(3), x.PartialTaskExecID.GetRetryAttempt())
assert.Equal(t, "o0", x.LiteralName)
})
@@ -210,11 +210,11 @@ func TestParseFlyteURLToExecution(t *testing.T) {
x, err := ParseFlyteURLToExecution("flyte://v1/fs/dev/abc/n0/3/o")
assert.NoError(t, err)
assert.Nil(t, x.NodeExecID)
- assert.Nil(t, x.PartialTaskExecID.TaskId)
- assert.Equal(t, "fs", x.PartialTaskExecID.NodeExecutionId.ExecutionId.Project)
- assert.Equal(t, "dev", x.PartialTaskExecID.NodeExecutionId.ExecutionId.Domain)
- assert.Equal(t, "abc", x.PartialTaskExecID.NodeExecutionId.ExecutionId.Name)
- assert.Equal(t, "n0", x.PartialTaskExecID.NodeExecutionId.NodeId)
+ assert.Nil(t, x.PartialTaskExecID.GetTaskId())
+ assert.Equal(t, "fs", x.PartialTaskExecID.GetNodeExecutionId().GetExecutionId().GetProject())
+ assert.Equal(t, "dev", x.PartialTaskExecID.GetNodeExecutionId().GetExecutionId().GetDomain())
+ assert.Equal(t, "abc", x.PartialTaskExecID.GetNodeExecutionId().GetExecutionId().GetName())
+ assert.Equal(t, "n0", x.PartialTaskExecID.GetNodeExecutionId().GetNodeId())
assert.Equal(t, uint32(3), x.PartialTaskExecID.GetRetryAttempt())
assert.Equal(t, "", x.LiteralName)
})
@@ -224,10 +224,10 @@ func TestParseFlyteURLToExecution(t *testing.T) {
assert.NoError(t, err)
assert.NotNil(t, x.NodeExecID)
assert.Nil(t, x.PartialTaskExecID)
- assert.Equal(t, "fs", x.NodeExecID.ExecutionId.Project)
- assert.Equal(t, "dev", x.NodeExecID.ExecutionId.Domain)
- assert.Equal(t, "abc", x.NodeExecID.ExecutionId.Name)
- assert.Equal(t, "n0", x.NodeExecID.NodeId)
+ assert.Equal(t, "fs", x.NodeExecID.GetExecutionId().GetProject())
+ assert.Equal(t, "dev", x.NodeExecID.GetExecutionId().GetDomain())
+ assert.Equal(t, "abc", x.NodeExecID.GetExecutionId().GetName())
+ assert.Equal(t, "n0", x.NodeExecID.GetNodeId())
assert.Equal(t, "o0", x.LiteralName)
})
@@ -236,10 +236,10 @@ func TestParseFlyteURLToExecution(t *testing.T) {
assert.NoError(t, err)
assert.NotNil(t, x.NodeExecID)
assert.Nil(t, x.PartialTaskExecID)
- assert.Equal(t, "fs", x.NodeExecID.ExecutionId.Project)
- assert.Equal(t, "dev", x.NodeExecID.ExecutionId.Domain)
- assert.Equal(t, "abc", x.NodeExecID.ExecutionId.Name)
- assert.Equal(t, "n0", x.NodeExecID.NodeId)
+ assert.Equal(t, "fs", x.NodeExecID.GetExecutionId().GetProject())
+ assert.Equal(t, "dev", x.NodeExecID.GetExecutionId().GetDomain())
+ assert.Equal(t, "abc", x.NodeExecID.GetExecutionId().GetName())
+ assert.Equal(t, "n0", x.NodeExecID.GetNodeId())
assert.Equal(t, "", x.LiteralName)
})
@@ -248,10 +248,10 @@ func TestParseFlyteURLToExecution(t *testing.T) {
assert.NoError(t, err)
assert.NotNil(t, x.NodeExecID)
assert.Nil(t, x.PartialTaskExecID)
- assert.Equal(t, "fs", x.NodeExecID.ExecutionId.Project)
- assert.Equal(t, "dev", x.NodeExecID.ExecutionId.Domain)
- assert.Equal(t, "abc", x.NodeExecID.ExecutionId.Name)
- assert.Equal(t, "n0", x.NodeExecID.NodeId)
+ assert.Equal(t, "fs", x.NodeExecID.GetExecutionId().GetProject())
+ assert.Equal(t, "dev", x.NodeExecID.GetExecutionId().GetDomain())
+ assert.Equal(t, "abc", x.NodeExecID.GetExecutionId().GetName())
+ assert.Equal(t, "n0", x.NodeExecID.GetNodeId())
assert.Equal(t, "", x.LiteralName)
assert.Equal(t, ArtifactTypeI, x.IOType)
})
@@ -261,10 +261,10 @@ func TestParseFlyteURLToExecution(t *testing.T) {
assert.NoError(t, err)
assert.NotNil(t, x.NodeExecID)
assert.Nil(t, x.PartialTaskExecID)
- assert.Equal(t, "fs", x.NodeExecID.ExecutionId.Project)
- assert.Equal(t, "dev", x.NodeExecID.ExecutionId.Domain)
- assert.Equal(t, "abc", x.NodeExecID.ExecutionId.Name)
- assert.Equal(t, "n0", x.NodeExecID.NodeId)
+ assert.Equal(t, "fs", x.NodeExecID.GetExecutionId().GetProject())
+ assert.Equal(t, "dev", x.NodeExecID.GetExecutionId().GetDomain())
+ assert.Equal(t, "abc", x.NodeExecID.GetExecutionId().GetName())
+ assert.Equal(t, "n0", x.NodeExecID.GetNodeId())
assert.Equal(t, "", x.LiteralName)
assert.Equal(t, ArtifactTypeD, x.IOType)
})
diff --git a/flyteadmin/pkg/common/sorting.go b/flyteadmin/pkg/common/sorting.go
index c89b86a914..246c73b52c 100644
--- a/flyteadmin/pkg/common/sorting.go
+++ b/flyteadmin/pkg/common/sorting.go
@@ -30,13 +30,13 @@ func NewSortParameter(sort *admin.Sort, allowed sets.String) (SortParameter, err
return nil, nil
}
- key := sort.Key
+ key := sort.GetKey()
if !allowed.Has(key) {
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid sort key '%s'", key)
}
var gormOrderExpression string
- switch sort.Direction {
+ switch sort.GetDirection() {
case admin.Sort_DESCENDING:
gormOrderExpression = fmt.Sprintf(gormDescending, key)
case admin.Sort_ASCENDING:
diff --git a/flyteadmin/pkg/data/implementations/aws_remote_url_test.go b/flyteadmin/pkg/data/implementations/aws_remote_url_test.go
index 878351fcf2..0677a498a3 100644
--- a/flyteadmin/pkg/data/implementations/aws_remote_url_test.go
+++ b/flyteadmin/pkg/data/implementations/aws_remote_url_test.go
@@ -80,6 +80,6 @@ func TestAWSGet(t *testing.T) {
}
urlBlob, err := remoteURL.Get(context.Background(), "s3://bucket/key")
assert.Nil(t, err)
- assert.Equal(t, "www://host/path", urlBlob.Url)
- assert.Equal(t, contentLength, urlBlob.Bytes)
+ assert.Equal(t, "www://host/path", urlBlob.GetUrl())
+ assert.Equal(t, contentLength, urlBlob.GetBytes())
}
diff --git a/flyteadmin/pkg/data/implementations/gcp_remote_url.go b/flyteadmin/pkg/data/implementations/gcp_remote_url.go
index 3a8dc98679..56fe7412e7 100644
--- a/flyteadmin/pkg/data/implementations/gcp_remote_url.go
+++ b/flyteadmin/pkg/data/implementations/gcp_remote_url.go
@@ -110,7 +110,7 @@ func (g *GCPRemoteURL) signURL(ctx context.Context, gcsURI GCPGCSObject) (string
if err != nil {
return nil, err
}
- return resp.SignedBlob, nil
+ return resp.GetSignedBlob(), nil
},
Expires: time.Now().Add(g.signDuration),
}
@@ -159,8 +159,8 @@ func (ts impersonationTokenSource) Token() (*oauth2.Token, error) {
}
return &oauth2.Token{
- AccessToken: resp.AccessToken,
- Expiry: asTime(resp.ExpireTime),
+ AccessToken: resp.GetAccessToken(),
+ Expiry: asTime(resp.GetExpireTime()),
}, nil
}
diff --git a/flyteadmin/pkg/data/implementations/gcp_remote_url_test.go b/flyteadmin/pkg/data/implementations/gcp_remote_url_test.go
index cfcce1ff5a..a1156518e6 100644
--- a/flyteadmin/pkg/data/implementations/gcp_remote_url_test.go
+++ b/flyteadmin/pkg/data/implementations/gcp_remote_url_test.go
@@ -88,7 +88,7 @@ func TestGCPGet(t *testing.T) {
mockIAMCredentials := mockIAMCredentialsImpl{}
mockIAMCredentials.signBlobFunc = func(ctx context.Context, req *credentialspb.SignBlobRequest, opts ...gax.CallOption) (*credentialspb.SignBlobResponse, error) {
- assert.Equal(t, "projects/-/serviceAccounts/"+signingPrincipal, req.Name)
+ assert.Equal(t, "projects/-/serviceAccounts/"+signingPrincipal, req.GetName())
return &credentialspb.SignBlobResponse{SignedBlob: []byte(signedBlob)}, nil
}
@@ -102,12 +102,12 @@ func TestGCPGet(t *testing.T) {
urlBlob, err := remoteURL.Get(context.Background(), "gs://bucket/key")
assert.Nil(t, err)
- u, _ := url.Parse(urlBlob.Url)
+ u, _ := url.Parse(urlBlob.GetUrl())
assert.Equal(t, "https", u.Scheme)
assert.Equal(t, "storage.googleapis.com", u.Hostname())
assert.Equal(t, "/bucket/key", u.Path)
assert.Equal(t, encodedSignedBlob, u.Query().Get("Signature"))
- assert.Equal(t, int64(100), urlBlob.Bytes)
+ assert.Equal(t, int64(100), urlBlob.GetBytes())
}
func TestToken(t *testing.T) {
@@ -117,8 +117,8 @@ func TestToken(t *testing.T) {
mockIAMCredentials := mockIAMCredentialsImpl{}
mockIAMCredentials.generateAccessTokenFunc = func(ctx context.Context, req *credentialspb.GenerateAccessTokenRequest, opts ...gax.CallOption) (*credentialspb.GenerateAccessTokenResponse, error) {
- assert.Equal(t, "projects/-/serviceAccounts/"+signingPrincipal, req.Name)
- assert.Equal(t, []string{"https://www.googleapis.com/auth/devstorage.read_only"}, req.Scope)
+ assert.Equal(t, "projects/-/serviceAccounts/"+signingPrincipal, req.GetName())
+ assert.Equal(t, []string{"https://www.googleapis.com/auth/devstorage.read_only"}, req.GetScope())
return &credentialspb.GenerateAccessTokenResponse{
AccessToken: token,
ExpireTime: ×tamp,
diff --git a/flyteadmin/pkg/data/implementations/noop_remote_url_test.go b/flyteadmin/pkg/data/implementations/noop_remote_url_test.go
index 965dc9eeb2..c4e14a394a 100644
--- a/flyteadmin/pkg/data/implementations/noop_remote_url_test.go
+++ b/flyteadmin/pkg/data/implementations/noop_remote_url_test.go
@@ -44,6 +44,6 @@ func TestNoopRemoteURLGet(t *testing.T) {
urlBlob, err := noopRemoteURL.Get(context.Background(), "uri")
assert.Nil(t, err)
assert.NotEmpty(t, urlBlob)
- assert.Equal(t, "uri", urlBlob.Url)
- assert.Equal(t, noopFileSize, urlBlob.Bytes)
+ assert.Equal(t, "uri", urlBlob.GetUrl())
+ assert.Equal(t, noopFileSize, urlBlob.GetBytes())
}
diff --git a/flyteadmin/pkg/errors/errors.go b/flyteadmin/pkg/errors/errors.go
index 5fc48b0b67..8e280e11dd 100644
--- a/flyteadmin/pkg/errors/errors.go
+++ b/flyteadmin/pkg/errors/errors.go
@@ -91,7 +91,7 @@ func NewAlreadyInTerminalStateError(ctx context.Context, errorMsg string, curPha
statusErr, transformationErr := NewFlyteAdminError(codes.FailedPrecondition, errorMsg).WithDetails(reason)
if transformationErr != nil {
logger.Panicf(ctx, "Failed to wrap grpc status in type 'Error': %v", transformationErr)
- return NewFlyteAdminErrorf(codes.FailedPrecondition, errorMsg)
+ return NewFlyteAdminErrorf(codes.FailedPrecondition, errorMsg) //nolint
}
return statusErr
}
@@ -105,8 +105,8 @@ func NewIncompatibleClusterError(ctx context.Context, errorMsg, curCluster strin
},
})
if transformationErr != nil {
- logger.Panicf(ctx, "Failed to wrap grpc status in type 'Error': %v", transformationErr)
- return NewFlyteAdminErrorf(codes.FailedPrecondition, errorMsg)
+ logger.Panicf(ctx, "Failed to wrap grpc status in type 'Error': %v", transformationErr) //nolint
+ return NewFlyteAdminErrorf(codes.FailedPrecondition, errorMsg) //nolint
}
return statusErr
}
@@ -128,23 +128,23 @@ func compareJsons(jsonArray1 jsondiff.Patch, jsonArray2 jsondiff.Patch) []string
}
func NewTaskExistsDifferentStructureError(ctx context.Context, request *admin.TaskCreateRequest, oldSpec *core.CompiledTask, newSpec *core.CompiledTask) FlyteAdminError {
- errorMsg := fmt.Sprintf("%v task with different structure already exists. (Please register a new version of the task):\n", request.Id.Name)
+ errorMsg := fmt.Sprintf("%v task with different structure already exists. (Please register a new version of the task):\n", request.GetId().GetName())
diff, _ := jsondiff.Compare(oldSpec, newSpec)
rdiff, _ := jsondiff.Compare(newSpec, oldSpec)
rs := compareJsons(diff, rdiff)
errorMsg += strings.Join(rs, "\n")
- return NewFlyteAdminErrorf(codes.InvalidArgument, errorMsg)
+ return NewFlyteAdminErrorf(codes.InvalidArgument, errorMsg) //nolint
}
func NewTaskExistsIdenticalStructureError(ctx context.Context, request *admin.TaskCreateRequest) FlyteAdminError {
errorMsg := "task with identical structure already exists"
- return NewFlyteAdminErrorf(codes.AlreadyExists, errorMsg)
+ return NewFlyteAdminErrorf(codes.AlreadyExists, errorMsg) //nolint
}
func NewWorkflowExistsDifferentStructureError(ctx context.Context, request *admin.WorkflowCreateRequest, oldSpec *core.CompiledWorkflowClosure, newSpec *core.CompiledWorkflowClosure) FlyteAdminError {
- errorMsg := fmt.Sprintf("%v workflow with different structure already exists. (Please register a new version of the workflow):\n", request.Id.Name)
+ errorMsg := fmt.Sprintf("%v workflow with different structure already exists. (Please register a new version of the workflow):\n", request.GetId().GetName())
diff, _ := jsondiff.Compare(oldSpec, newSpec)
rdiff, _ := jsondiff.Compare(newSpec, oldSpec)
rs := compareJsons(diff, rdiff)
@@ -154,13 +154,13 @@ func NewWorkflowExistsDifferentStructureError(ctx context.Context, request *admi
statusErr, transformationErr := NewFlyteAdminError(codes.InvalidArgument, errorMsg).WithDetails(&admin.CreateWorkflowFailureReason{
Reason: &admin.CreateWorkflowFailureReason_ExistsDifferentStructure{
ExistsDifferentStructure: &admin.WorkflowErrorExistsDifferentStructure{
- Id: request.Id,
+ Id: request.GetId(),
},
},
})
if transformationErr != nil {
logger.Errorf(ctx, "Failed to wrap grpc status in type 'Error': %v", transformationErr)
- return NewFlyteAdminErrorf(codes.InvalidArgument, errorMsg)
+ return NewFlyteAdminErrorf(codes.InvalidArgument, errorMsg) //nolint
}
return statusErr
}
@@ -170,31 +170,31 @@ func NewWorkflowExistsIdenticalStructureError(ctx context.Context, request *admi
statusErr, transformationErr := NewFlyteAdminError(codes.AlreadyExists, errorMsg).WithDetails(&admin.CreateWorkflowFailureReason{
Reason: &admin.CreateWorkflowFailureReason_ExistsIdenticalStructure{
ExistsIdenticalStructure: &admin.WorkflowErrorExistsIdenticalStructure{
- Id: request.Id,
+ Id: request.GetId(),
},
},
})
if transformationErr != nil {
logger.Errorf(ctx, "Failed to wrap grpc status in type 'Error': %v", transformationErr)
- return NewFlyteAdminErrorf(codes.AlreadyExists, errorMsg)
+ return NewFlyteAdminErrorf(codes.AlreadyExists, errorMsg) //nolint
}
return statusErr
}
func NewLaunchPlanExistsDifferentStructureError(ctx context.Context, request *admin.LaunchPlanCreateRequest, oldSpec *admin.LaunchPlanSpec, newSpec *admin.LaunchPlanSpec) FlyteAdminError {
- errorMsg := fmt.Sprintf("%v launch plan with different structure already exists. (Please register a new version of the launch plan):\n", request.Id.Name)
+ errorMsg := fmt.Sprintf("%v launch plan with different structure already exists. (Please register a new version of the launch plan):\n", request.GetId().GetName())
diff, _ := jsondiff.Compare(oldSpec, newSpec)
rdiff, _ := jsondiff.Compare(newSpec, oldSpec)
rs := compareJsons(diff, rdiff)
errorMsg += strings.Join(rs, "\n")
- return NewFlyteAdminErrorf(codes.InvalidArgument, errorMsg)
+ return NewFlyteAdminErrorf(codes.InvalidArgument, errorMsg) //nolint
}
func NewLaunchPlanExistsIdenticalStructureError(ctx context.Context, request *admin.LaunchPlanCreateRequest) FlyteAdminError {
errorMsg := "launch plan with identical structure already exists"
- return NewFlyteAdminErrorf(codes.AlreadyExists, errorMsg)
+ return NewFlyteAdminErrorf(codes.AlreadyExists, errorMsg) //nolint
}
func IsDoesNotExistError(err error) bool {
@@ -209,12 +209,12 @@ func NewInactiveProjectError(ctx context.Context, id string) FlyteAdminError {
})
if transformationErr != nil {
logger.Errorf(ctx, "failed to wrap grpc status in type 'Error': %v", transformationErr)
- return NewFlyteAdminErrorf(codes.InvalidArgument, errMsg)
+ return NewFlyteAdminErrorf(codes.InvalidArgument, errMsg) //nolint
}
return statusErr
}
func NewInvalidLiteralTypeError(name string, err error) FlyteAdminError {
return NewFlyteAdminErrorf(codes.InvalidArgument,
- fmt.Sprintf("Failed to validate literal type for [%s] with err: %s", name, err))
+ fmt.Sprintf("Failed to validate literal type for [%s] with err: %s", name, err)) //nolint
}
diff --git a/flyteadmin/pkg/errors/errors_test.go b/flyteadmin/pkg/errors/errors_test.go
index 18c76992b5..a72b4ce2eb 100644
--- a/flyteadmin/pkg/errors/errors_test.go
+++ b/flyteadmin/pkg/errors/errors_test.go
@@ -284,7 +284,7 @@ func TestNewLaunchPlanExistsDifferentStructureError(t *testing.T) {
Id: &identifier,
}
- statusErr := NewLaunchPlanExistsDifferentStructureError(context.Background(), req, oldLaunchPlan.Spec, newLaunchPlan.Spec)
+ statusErr := NewLaunchPlanExistsDifferentStructureError(context.Background(), req, oldLaunchPlan.GetSpec(), newLaunchPlan.GetSpec())
assert.NotNil(t, statusErr)
s, ok := status.FromError(statusErr)
assert.True(t, ok)
@@ -325,5 +325,5 @@ func TestNewInactiveProjectError(t *testing.T) {
details, ok := statusErr.Details()[0].(*admin.InactiveProject)
assert.True(t, ok)
- assert.Equal(t, identifier.GetProject(), details.Id)
+ assert.Equal(t, identifier.GetProject(), details.GetId())
}
diff --git a/flyteadmin/pkg/executioncluster/impl/in_cluster.go b/flyteadmin/pkg/executioncluster/impl/in_cluster.go
index f06d1c4adf..2fdd8271e1 100644
--- a/flyteadmin/pkg/executioncluster/impl/in_cluster.go
+++ b/flyteadmin/pkg/executioncluster/impl/in_cluster.go
@@ -26,8 +26,8 @@ func (i InCluster) GetTarget(ctx context.Context, spec *executioncluster.Executi
if spec != nil && !(spec.TargetID == "" || spec.TargetID == defaultInClusterTargetID) {
return nil, errors.New(fmt.Sprintf("remote target %s is not supported", spec.TargetID))
}
- if spec != nil && spec.ExecutionClusterLabel != nil && spec.ExecutionClusterLabel.Value != "" {
- return nil, errors.New(fmt.Sprintf("execution cluster label %s is not supported", spec.ExecutionClusterLabel.Value))
+ if spec != nil && spec.ExecutionClusterLabel != nil && spec.ExecutionClusterLabel.GetValue() != "" {
+ return nil, errors.New(fmt.Sprintf("execution cluster label %s is not supported", spec.ExecutionClusterLabel.GetValue()))
}
return &i.target, nil
}
diff --git a/flyteadmin/pkg/executioncluster/impl/random_cluster_selector.go b/flyteadmin/pkg/executioncluster/impl/random_cluster_selector.go
index 35340d3822..e4c2149220 100644
--- a/flyteadmin/pkg/executioncluster/impl/random_cluster_selector.go
+++ b/flyteadmin/pkg/executioncluster/impl/random_cluster_selector.go
@@ -34,7 +34,7 @@ func getRandSource(seed string) (rand.Source, error) {
if err != nil {
return nil, err
}
- hashedSeed := int64(h.Sum64())
+ hashedSeed := int64(h.Sum64()) // #nosec G115
return rand.NewSource(hashedSeed), nil
}
@@ -98,8 +98,8 @@ func (s RandomClusterSelector) GetTarget(ctx context.Context, spec *executionclu
var label string
- if spec.ExecutionClusterLabel != nil && spec.ExecutionClusterLabel.Value != "" {
- label = spec.ExecutionClusterLabel.Value
+ if spec.ExecutionClusterLabel != nil && spec.ExecutionClusterLabel.GetValue() != "" {
+ label = spec.ExecutionClusterLabel.GetValue()
logger.Debugf(ctx, "Using execution cluster label %s", label)
} else {
resource, err := s.resourceManager.GetResource(ctx, managerInterfaces.ResourceRequest{
@@ -113,7 +113,7 @@ func (s RandomClusterSelector) GetTarget(ctx context.Context, spec *executionclu
return nil, err
}
if resource != nil && resource.Attributes.GetExecutionClusterLabel() != nil {
- label = resource.Attributes.GetExecutionClusterLabel().Value
+ label = resource.Attributes.GetExecutionClusterLabel().GetValue()
}
}
diff --git a/flyteadmin/pkg/manager/impl/description_entity_manager.go b/flyteadmin/pkg/manager/impl/description_entity_manager.go
index a7affd5e88..7a2fdd239c 100644
--- a/flyteadmin/pkg/manager/impl/description_entity_manager.go
+++ b/flyteadmin/pkg/manager/impl/description_entity_manager.go
@@ -38,8 +38,8 @@ func (d *DescriptionEntityManager) GetDescriptionEntity(ctx context.Context, req
logger.Errorf(ctx, "invalid request [%+v]: %v", request, err)
return nil, err
}
- ctx = contextutils.WithProjectDomain(ctx, request.Id.Project, request.Id.Domain)
- return util.GetDescriptionEntity(ctx, d.db, request.Id)
+ ctx = contextutils.WithProjectDomain(ctx, request.GetId().GetProject(), request.GetId().GetDomain())
+ return util.GetDescriptionEntity(ctx, d.db, request.GetId())
}
func (d *DescriptionEntityManager) ListDescriptionEntity(ctx context.Context, request *admin.DescriptionEntityListRequest) (*admin.DescriptionEntityList, error) {
@@ -47,44 +47,44 @@ func (d *DescriptionEntityManager) ListDescriptionEntity(ctx context.Context, re
if err := validation.ValidateDescriptionEntityListRequest(request); err != nil {
return nil, err
}
- ctx = contextutils.WithProjectDomain(ctx, request.Id.Project, request.Id.Domain)
+ ctx = contextutils.WithProjectDomain(ctx, request.GetId().GetProject(), request.GetId().GetDomain())
- if request.ResourceType == core.ResourceType_WORKFLOW {
- ctx = contextutils.WithWorkflowID(ctx, request.Id.Name)
+ if request.GetResourceType() == core.ResourceType_WORKFLOW {
+ ctx = contextutils.WithWorkflowID(ctx, request.GetId().GetName())
} else {
- ctx = contextutils.WithTaskID(ctx, request.Id.Name)
+ ctx = contextutils.WithTaskID(ctx, request.GetId().GetName())
}
filters, err := util.GetDbFilters(util.FilterSpec{
- Project: request.Id.Project,
- Domain: request.Id.Domain,
- Name: request.Id.Name,
- RequestFilters: request.Filters,
- }, common.ResourceTypeToEntity[request.ResourceType])
+ Project: request.GetId().GetProject(),
+ Domain: request.GetId().GetDomain(),
+ Name: request.GetId().GetName(),
+ RequestFilters: request.GetFilters(),
+ }, common.ResourceTypeToEntity[request.GetResourceType()])
if err != nil {
logger.Error(ctx, "failed to get database filter")
return nil, err
}
- sortParameter, err := common.NewSortParameter(request.SortBy, models.DescriptionEntityColumns)
+ sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.DescriptionEntityColumns)
if err != nil {
return nil, err
}
- offset, err := validation.ValidateToken(request.Token)
+ offset, err := validation.ValidateToken(request.GetToken())
if err != nil {
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument,
- "invalid pagination token %s for ListWorkflows", request.Token)
+ "invalid pagination token %s for ListWorkflows", request.GetToken())
}
listDescriptionEntitiesInput := repoInterfaces.ListResourceInput{
- Limit: int(request.Limit),
+ Limit: int(request.GetLimit()),
Offset: offset,
InlineFilters: filters,
SortParameter: sortParameter,
}
output, err := d.db.DescriptionEntityRepo().List(ctx, listDescriptionEntitiesInput)
if err != nil {
- logger.Debugf(ctx, "Failed to list workflows with [%+v] with err %v", request.Id, err)
+ logger.Debugf(ctx, "Failed to list workflows with [%+v] with err %v", request.GetId(), err)
return nil, err
}
descriptionEntityList, err := transformers.FromDescriptionEntityModels(output.Entities)
@@ -94,7 +94,7 @@ func (d *DescriptionEntityManager) ListDescriptionEntity(ctx context.Context, re
return nil, err
}
var token string
- if len(output.Entities) == int(request.Limit) {
+ if len(output.Entities) == int(request.GetLimit()) {
token = strconv.Itoa(offset + len(output.Entities))
}
return &admin.DescriptionEntityList{
diff --git a/flyteadmin/pkg/manager/impl/execution_manager.go b/flyteadmin/pkg/manager/impl/execution_manager.go
index e700a744d8..fd8f0870f1 100644
--- a/flyteadmin/pkg/manager/impl/execution_manager.go
+++ b/flyteadmin/pkg/manager/impl/execution_manager.go
@@ -95,8 +95,8 @@ type ExecutionManager struct {
}
func getExecutionContext(ctx context.Context, id *core.WorkflowExecutionIdentifier) context.Context {
- ctx = contextutils.WithExecutionID(ctx, id.Name)
- return contextutils.WithProjectDomain(ctx, id.Project, id.Domain)
+ ctx = contextutils.WithExecutionID(ctx, id.GetName())
+ return contextutils.WithProjectDomain(ctx, id.GetProject(), id.GetDomain())
}
// Returns the unique string which identifies the authenticated end user (if any).
@@ -108,16 +108,16 @@ func getUser(ctx context.Context) string {
func (m *ExecutionManager) populateExecutionQueue(
ctx context.Context, identifier *core.Identifier, compiledWorkflow *core.CompiledWorkflowClosure) {
queueConfig := m.queueAllocator.GetQueue(ctx, identifier)
- for _, task := range compiledWorkflow.Tasks {
- container := task.Template.GetContainer()
+ for _, task := range compiledWorkflow.GetTasks() {
+ container := task.GetTemplate().GetContainer()
if container == nil {
// Unrecognized target type, nothing to do
continue
}
if queueConfig.DynamicQueue != "" {
- logger.Debugf(ctx, "Assigning %s as child queue for task %+v", queueConfig.DynamicQueue, task.Template.Id)
- container.Config = append(container.Config, &core.KeyValuePair{
+ logger.Debugf(ctx, "Assigning %s as child queue for task %+v", queueConfig.DynamicQueue, task.GetTemplate().GetId())
+ container.Config = append(container.GetConfig(), &core.KeyValuePair{
Key: childContainerQueueKey,
Value: queueConfig.DynamicQueue,
})
@@ -159,8 +159,8 @@ func resolveStringMap(preferredValues, defaultValues mapWithValues, valueName st
func (m *ExecutionManager) addPluginOverrides(ctx context.Context, executionID *core.WorkflowExecutionIdentifier,
workflowName, launchPlanName string) ([]*admin.PluginOverride, error) {
override, err := m.resourceManager.GetResource(ctx, interfaces.ResourceRequest{
- Project: executionID.Project,
- Domain: executionID.Domain,
+ Project: executionID.GetProject(),
+ Domain: executionID.GetDomain(),
Workflow: workflowName,
LaunchPlan: launchPlanName,
ResourceType: admin.MatchableResource_PLUGIN_OVERRIDE,
@@ -169,7 +169,7 @@ func (m *ExecutionManager) addPluginOverrides(ctx context.Context, executionID *
return nil, err
}
if override != nil && override.Attributes != nil && override.Attributes.GetPluginOverrides() != nil {
- return override.Attributes.GetPluginOverrides().Overrides, nil
+ return override.Attributes.GetPluginOverrides().GetOverrides(), nil
}
return nil, nil
}
@@ -188,13 +188,13 @@ func (m *ExecutionManager) setCompiledTaskDefaults(ctx context.Context, task *co
return
}
- if task.Template == nil || task.Template.GetContainer() == nil {
+ if task.GetTemplate() == nil || task.GetTemplate().GetContainer() == nil {
// Nothing to do
logger.Debugf(ctx, "Not setting default resources for task [%+v], no container resources found to check", task)
return
}
- if task.Template.GetContainer().Resources == nil {
+ if task.GetTemplate().GetContainer().GetResources() == nil {
// In case of no resources on the container, create empty requests and limits
// so the container will still have resources configure properly
task.Template.GetContainer().Resources = &core.Resources{
@@ -209,7 +209,7 @@ func (m *ExecutionManager) setCompiledTaskDefaults(ctx context.Context, task *co
// The IDL representation for container-type tasks represents resources as a list with string quantities.
// In order to easily reason about them we convert them to a set where we can O(1) fetch specific resources (e.g. CPU)
// and represent them as comparable quantities rather than strings.
- taskResourceRequirements := util.GetCompleteTaskResourceRequirements(ctx, task.Template.Id, task)
+ taskResourceRequirements := util.GetCompleteTaskResourceRequirements(ctx, task.GetTemplate().GetId(), task)
cpu := flytek8s.AdjustOrDefaultResource(taskResourceRequirements.Defaults.CPU, taskResourceRequirements.Limits.CPU,
platformTaskResources.Defaults.CPU, platformTaskResources.Limits.CPU)
@@ -276,22 +276,22 @@ func (m *ExecutionManager) setCompiledTaskDefaults(ctx context.Context, task *co
// as well as sets request spec metadata with the inherited principal and adjusted nesting data.
func (m *ExecutionManager) getInheritedExecMetadata(ctx context.Context, requestSpec *admin.ExecutionSpec,
workflowExecutionID *core.WorkflowExecutionIdentifier) (parentNodeExecutionID uint, sourceExecutionID uint, err error) {
- if requestSpec.Metadata == nil || requestSpec.Metadata.ParentNodeExecution == nil {
+ if requestSpec.GetMetadata() == nil || requestSpec.GetMetadata().GetParentNodeExecution() == nil {
return parentNodeExecutionID, sourceExecutionID, nil
}
- parentNodeExecutionModel, err := util.GetNodeExecutionModel(ctx, m.db, requestSpec.Metadata.ParentNodeExecution)
+ parentNodeExecutionModel, err := util.GetNodeExecutionModel(ctx, m.db, requestSpec.GetMetadata().GetParentNodeExecution())
if err != nil {
logger.Errorf(ctx, "Failed to get node execution [%+v] that launched this execution [%+v] with error %v",
- requestSpec.Metadata.ParentNodeExecution, workflowExecutionID, err)
+ requestSpec.GetMetadata().GetParentNodeExecution(), workflowExecutionID, err)
return parentNodeExecutionID, sourceExecutionID, err
}
parentNodeExecutionID = parentNodeExecutionModel.ID
- sourceExecutionModel, err := util.GetExecutionModel(ctx, m.db, requestSpec.Metadata.ParentNodeExecution.ExecutionId)
+ sourceExecutionModel, err := util.GetExecutionModel(ctx, m.db, requestSpec.GetMetadata().GetParentNodeExecution().GetExecutionId())
if err != nil {
logger.Errorf(ctx, "Failed to get workflow execution [%+v] that launched this execution [%+v] with error %v",
- requestSpec.Metadata.ParentNodeExecution, workflowExecutionID, err)
+ requestSpec.GetMetadata().GetParentNodeExecution(), workflowExecutionID, err)
return parentNodeExecutionID, sourceExecutionID, err
}
sourceExecutionID = sourceExecutionModel.ID
@@ -301,16 +301,16 @@ func (m *ExecutionManager) getInheritedExecMetadata(ctx context.Context, request
logger.Errorf(ctx, "Failed transform parent execution model for child execution [%+v] with err: %v", workflowExecutionID, err)
return parentNodeExecutionID, sourceExecutionID, err
}
- if sourceExecution.Spec.Metadata != nil {
- requestSpec.Metadata.Nesting = sourceExecution.Spec.Metadata.Nesting + 1
+ if sourceExecution.GetSpec().GetMetadata() != nil {
+ requestSpec.Metadata.Nesting = sourceExecution.GetSpec().GetMetadata().GetNesting() + 1
} else {
requestSpec.Metadata.Nesting = 1
}
// If the source execution has a cluster label, inherit it.
- if sourceExecution.Spec.ExecutionClusterLabel != nil {
- logger.Infof(ctx, "Inherited execution label from source execution [%+v]", sourceExecution.Spec.ExecutionClusterLabel.Value)
- requestSpec.ExecutionClusterLabel = sourceExecution.Spec.ExecutionClusterLabel
+ if sourceExecution.GetSpec().GetExecutionClusterLabel() != nil {
+ logger.Infof(ctx, "Inherited execution label from source execution [%+v]", sourceExecution.GetSpec().GetExecutionClusterLabel().GetValue())
+ requestSpec.ExecutionClusterLabel = sourceExecution.GetSpec().GetExecutionClusterLabel()
}
return parentNodeExecutionID, sourceExecutionID, nil
}
@@ -324,20 +324,20 @@ func (m *ExecutionManager) getExecutionConfig(ctx context.Context, request *admi
workflowExecConfig := &admin.WorkflowExecutionConfig{}
// Merge the request spec into workflowExecConfig
- workflowExecConfig = util.MergeIntoExecConfig(workflowExecConfig, request.Spec)
+ workflowExecConfig = util.MergeIntoExecConfig(workflowExecConfig, request.GetSpec())
var workflowName string
- if launchPlan != nil && launchPlan.Spec != nil {
+ if launchPlan != nil && launchPlan.GetSpec() != nil {
// Merge the launch plan spec into workflowExecConfig
- workflowExecConfig = util.MergeIntoExecConfig(workflowExecConfig, launchPlan.Spec)
- if launchPlan.Spec.WorkflowId != nil {
- workflowName = launchPlan.Spec.WorkflowId.Name
+ workflowExecConfig = util.MergeIntoExecConfig(workflowExecConfig, launchPlan.GetSpec())
+ if launchPlan.GetSpec().GetWorkflowId() != nil {
+ workflowName = launchPlan.GetSpec().GetWorkflowId().GetName()
}
}
// This will get the most specific Workflow Execution Config.
matchableResource, err := util.GetMatchableResource(ctx, m.resourceManager,
- admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG, request.Project, request.Domain, workflowName)
+ admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG, request.GetProject(), request.GetDomain(), workflowName)
if err != nil {
return nil, err
}
@@ -356,7 +356,7 @@ func (m *ExecutionManager) getExecutionConfig(ctx context.Context, request *admi
// system level defaults for the rest.
// See FLYTE-2322 for more background information.
projectMatchableResource, err := util.GetMatchableResource(ctx, m.resourceManager,
- admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG, request.Project, "", "")
+ admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG, request.GetProject(), "", "")
if err != nil {
return nil, err
}
@@ -404,7 +404,7 @@ func (m *ExecutionManager) getExecutionConfig(ctx context.Context, request *admi
}
func (m *ExecutionManager) getClusterAssignment(ctx context.Context, req *admin.ExecutionCreateRequest) (*admin.ClusterAssignment, error) {
- storedAssignment, err := m.fetchClusterAssignment(ctx, req.Project, req.Domain)
+ storedAssignment, err := m.fetchClusterAssignment(ctx, req.GetProject(), req.GetDomain())
if err != nil {
return nil, err
}
@@ -421,7 +421,7 @@ func (m *ExecutionManager) getClusterAssignment(ctx context.Context, req *admin.
}
if reqPool != storedPool {
- return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "execution with project %q and domain %q cannot run on cluster pool %q, because its configured to run on pool %q", req.Project, req.Domain, reqPool, storedPool)
+ return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "execution with project %q and domain %q cannot run on cluster pool %q, because its configured to run on pool %q", req.GetProject(), req.GetDomain(), reqPool, storedPool)
}
return storedAssignment, nil
@@ -454,10 +454,10 @@ func (m *ExecutionManager) launchSingleTaskExecution(
context.Context, *models.Execution, error) {
taskModel, err := m.db.TaskRepo().Get(ctx, repositoryInterfaces.Identifier{
- Project: request.Spec.LaunchPlan.Project,
- Domain: request.Spec.LaunchPlan.Domain,
- Name: request.Spec.LaunchPlan.Name,
- Version: request.Spec.LaunchPlan.Version,
+ Project: request.GetSpec().GetLaunchPlan().GetProject(),
+ Domain: request.GetSpec().GetLaunchPlan().GetDomain(),
+ Name: request.GetSpec().GetLaunchPlan().GetName(),
+ Version: request.GetSpec().GetLaunchPlan().GetVersion(),
})
if err != nil {
return nil, nil, err
@@ -468,7 +468,7 @@ func (m *ExecutionManager) launchSingleTaskExecution(
}
// Prepare a skeleton workflow and launch plan
- taskIdentifier := request.Spec.LaunchPlan
+ taskIdentifier := request.GetSpec().GetLaunchPlan()
workflowModel, err :=
util.CreateOrGetWorkflowModel(ctx, request, m.db, m.workflowManager, m.namedEntityManager, taskIdentifier, &task)
if err != nil {
@@ -481,27 +481,27 @@ func (m *ExecutionManager) launchSingleTaskExecution(
}
launchPlan, err := util.CreateOrGetLaunchPlan(ctx, m.db, m.config, m.namedEntityManager, taskIdentifier,
- workflow.Closure.CompiledWorkflow.Primary.Template.Interface, workflowModel.ID, request.Spec)
+ workflow.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetInterface(), workflowModel.ID, request.GetSpec())
if err != nil {
return nil, nil, err
}
executionInputs, err := validation.CheckAndFetchInputsForExecution(
- request.Inputs,
- launchPlan.Spec.FixedInputs,
- launchPlan.Closure.ExpectedInputs,
+ request.GetInputs(),
+ launchPlan.GetSpec().GetFixedInputs(),
+ launchPlan.GetClosure().GetExpectedInputs(),
)
if err != nil {
logger.Debugf(ctx, "Failed to CheckAndFetchInputsForExecution with request.Inputs: %+v"+
"fixed inputs: %+v and expected inputs: %+v with err %v",
- request.Inputs, launchPlan.Spec.FixedInputs, launchPlan.Closure.ExpectedInputs, err)
+ request.GetInputs(), launchPlan.GetSpec().GetFixedInputs(), launchPlan.GetClosure().GetExpectedInputs(), err)
return nil, nil, err
}
name := util.GetExecutionName(request)
workflowExecutionID := &core.WorkflowExecutionIdentifier{
- Project: request.Project,
- Domain: request.Domain,
+ Project: request.GetProject(),
+ Domain: request.GetDomain(),
Name: name,
}
@@ -519,15 +519,15 @@ func (m *ExecutionManager) launchSingleTaskExecution(
offloadInputsGroup.Go(func() error {
var err error
inputsURI, err = common.OffloadLiteralMap(offloadInputsGroupCtx, m.storageClient, executionInputs, // or request.Inputs?
- workflowExecutionID.Project, workflowExecutionID.Domain, workflowExecutionID.Name, shared.Inputs)
+ workflowExecutionID.GetProject(), workflowExecutionID.GetDomain(), workflowExecutionID.GetName(), shared.Inputs)
return err
})
var userInputsURI storage.DataReference
offloadInputsGroup.Go(func() error {
var err error
- userInputsURI, err = common.OffloadLiteralMap(offloadInputsGroupCtx, m.storageClient, request.Inputs,
- workflowExecutionID.Project, workflowExecutionID.Domain, workflowExecutionID.Name, shared.UserInputs)
+ userInputsURI, err = common.OffloadLiteralMap(offloadInputsGroupCtx, m.storageClient, request.GetInputs(),
+ workflowExecutionID.GetProject(), workflowExecutionID.GetDomain(), workflowExecutionID.GetName(), shared.UserInputs)
return err
})
@@ -535,15 +535,15 @@ func (m *ExecutionManager) launchSingleTaskExecution(
if err != nil {
return nil, nil, err
}
- closure.CreatedAt = workflow.Closure.CreatedAt
+ closure.CreatedAt = workflow.GetClosure().GetCreatedAt()
workflow.Closure = closure
ctx = getExecutionContext(ctx, workflowExecutionID)
namespace := common.GetNamespaceName(
- m.config.NamespaceMappingConfiguration().GetNamespaceTemplate(), workflowExecutionID.Project, workflowExecutionID.Domain)
+ m.config.NamespaceMappingConfiguration().GetNamespaceTemplate(), workflowExecutionID.GetProject(), workflowExecutionID.GetDomain())
- requestSpec := request.Spec
- if requestSpec.Metadata == nil {
+ requestSpec := request.GetSpec()
+ if requestSpec.GetMetadata() == nil {
requestSpec.Metadata = &admin.ExecutionMetadata{}
}
requestSpec.Metadata.Principal = getUser(ctx)
@@ -557,13 +557,13 @@ func (m *ExecutionManager) launchSingleTaskExecution(
}
// Dynamically assign task resource defaults.
- platformTaskResources := util.GetTaskResources(ctx, workflow.Id, m.resourceManager, m.config.TaskResourceConfiguration())
- for _, t := range workflow.Closure.CompiledWorkflow.Tasks {
+ platformTaskResources := util.GetTaskResources(ctx, workflow.GetId(), m.resourceManager, m.config.TaskResourceConfiguration())
+ for _, t := range workflow.GetClosure().GetCompiledWorkflow().GetTasks() {
m.setCompiledTaskDefaults(ctx, t, platformTaskResources)
}
// Dynamically assign execution queues.
- m.populateExecutionQueue(ctx, workflow.Id, workflow.Closure.CompiledWorkflow)
+ m.populateExecutionQueue(ctx, workflow.GetId(), workflow.GetClosure().GetCompiledWorkflow())
executionConfig, err := m.getExecutionConfig(ctx, request, nil)
if err != nil {
@@ -571,23 +571,23 @@ func (m *ExecutionManager) launchSingleTaskExecution(
}
var labels map[string]string
- if executionConfig.Labels != nil {
- labels = executionConfig.Labels.Values
+ if executionConfig.GetLabels() != nil {
+ labels = executionConfig.GetLabels().GetValues()
}
- labels, err = m.addProjectLabels(ctx, request.Project, labels)
+ labels, err = m.addProjectLabels(ctx, request.GetProject(), labels)
if err != nil {
return nil, nil, err
}
var annotations map[string]string
- if executionConfig.Annotations != nil {
- annotations = executionConfig.Annotations.Values
+ if executionConfig.GetAnnotations() != nil {
+ annotations = executionConfig.GetAnnotations().GetValues()
}
var rawOutputDataConfig *admin.RawOutputDataConfig
- if executionConfig.RawOutputDataConfig != nil {
- rawOutputDataConfig = executionConfig.RawOutputDataConfig
+ if executionConfig.GetRawOutputDataConfig() != nil {
+ rawOutputDataConfig = executionConfig.GetRawOutputDataConfig()
}
clusterAssignment, err := m.getClusterAssignment(ctx, request)
@@ -596,8 +596,8 @@ func (m *ExecutionManager) launchSingleTaskExecution(
}
var executionClusterLabel *admin.ExecutionClusterLabel
- if requestSpec.ExecutionClusterLabel != nil {
- executionClusterLabel = requestSpec.ExecutionClusterLabel
+ if requestSpec.GetExecutionClusterLabel() != nil {
+ executionClusterLabel = requestSpec.GetExecutionClusterLabel()
}
executionParameters := workflowengineInterfaces.ExecutionParameters{
Inputs: executionInputs,
@@ -613,16 +613,16 @@ func (m *ExecutionManager) launchSingleTaskExecution(
ExecutionClusterLabel: executionClusterLabel,
}
- overrides, err := m.addPluginOverrides(ctx, workflowExecutionID, workflowExecutionID.Name, "")
+ overrides, err := m.addPluginOverrides(ctx, workflowExecutionID, workflowExecutionID.GetName(), "")
if err != nil {
return nil, nil, err
}
if overrides != nil {
executionParameters.TaskPluginOverrides = overrides
}
- if request.Spec.Metadata != nil && request.Spec.Metadata.ReferenceExecution != nil &&
- request.Spec.Metadata.Mode == admin.ExecutionMetadata_RECOVERED {
- executionParameters.RecoveryExecution = request.Spec.Metadata.ReferenceExecution
+ if request.GetSpec().GetMetadata() != nil && request.GetSpec().GetMetadata().GetReferenceExecution() != nil &&
+ request.GetSpec().GetMetadata().GetMode() == admin.ExecutionMetadata_RECOVERED {
+ executionParameters.RecoveryExecution = request.GetSpec().GetMetadata().GetReferenceExecution()
}
err = offloadInputsGroup.Wait()
@@ -634,9 +634,9 @@ func (m *ExecutionManager) launchSingleTaskExecution(
execInfo, err := workflowExecutor.Execute(ctx, workflowengineInterfaces.ExecutionData{
Namespace: namespace,
ExecutionID: workflowExecutionID,
- ReferenceWorkflowName: workflow.Id.Name,
- ReferenceLaunchPlanName: launchPlan.Id.Name,
- WorkflowClosure: workflow.Closure.CompiledWorkflow,
+ ReferenceWorkflowName: workflow.GetId().GetName(),
+ ReferenceLaunchPlanName: launchPlan.GetId().GetName(),
+ WorkflowClosure: workflow.GetClosure().GetCompiledWorkflow(),
WorkflowClosureReference: storage.DataReference(workflowModel.RemoteClosureIdentifier),
ExecutionParameters: executionParameters,
OffloadedInputsReference: inputsURI,
@@ -645,7 +645,7 @@ func (m *ExecutionManager) launchSingleTaskExecution(
if err != nil {
m.systemMetrics.PropellerFailures.Inc()
logger.Infof(ctx, "Failed to execute workflow %+v with execution id %+v and inputs %+v with err %v",
- request, &workflowExecutionID, request.Inputs, err)
+ request, &workflowExecutionID, request.GetInputs(), err)
return nil, nil, err
}
executionCreatedAt := time.Now()
@@ -655,13 +655,13 @@ func (m *ExecutionManager) launchSingleTaskExecution(
// Request notification settings takes precedence over the launch plan settings.
// If there is no notification in the request and DisableAll is not true, use the settings from the launch plan.
var notificationsSettings []*admin.Notification
- if launchPlan.Spec.GetEntityMetadata() != nil {
- notificationsSettings = launchPlan.Spec.EntityMetadata.GetNotifications()
+ if launchPlan.GetSpec().GetEntityMetadata() != nil {
+ notificationsSettings = launchPlan.GetSpec().GetEntityMetadata().GetNotifications()
}
- if request.Spec.GetNotifications() != nil && request.Spec.GetNotifications().Notifications != nil &&
- len(request.Spec.GetNotifications().Notifications) > 0 {
- notificationsSettings = request.Spec.GetNotifications().Notifications
- } else if request.Spec.GetDisableAll() {
+ if request.GetSpec().GetNotifications() != nil && request.GetSpec().GetNotifications().GetNotifications() != nil &&
+ len(request.GetSpec().GetNotifications().GetNotifications()) > 0 {
+ notificationsSettings = request.GetSpec().GetNotifications().GetNotifications()
+ } else if request.GetSpec().GetDisableAll() {
notificationsSettings = make([]*admin.Notification, 0)
}
@@ -673,14 +673,14 @@ func (m *ExecutionManager) launchSingleTaskExecution(
// The execution is not considered running until the propeller sends a specific event saying so.
CreatedAt: m._clock.Now(),
Notifications: notificationsSettings,
- WorkflowIdentifier: workflow.Id,
+ WorkflowIdentifier: workflow.GetId(),
ParentNodeExecutionID: parentNodeExecutionID,
SourceExecutionID: sourceExecutionID,
Cluster: execInfo.Cluster,
InputsURI: inputsURI,
UserInputsURI: userInputsURI,
- SecurityContext: executionConfig.SecurityContext,
- LaunchEntity: taskIdentifier.ResourceType,
+ SecurityContext: executionConfig.GetSecurityContext(),
+ LaunchEntity: taskIdentifier.GetResourceType(),
Namespace: namespace,
})
if err != nil {
@@ -688,27 +688,27 @@ func (m *ExecutionManager) launchSingleTaskExecution(
workflowExecutionID, err)
return nil, nil, err
}
- m.userMetrics.WorkflowExecutionInputBytes.Observe(float64(proto.Size(request.Inputs)))
+ m.userMetrics.WorkflowExecutionInputBytes.Observe(float64(proto.Size(request.GetInputs())))
return ctx, executionModel, nil
}
func resolveAuthRole(request *admin.ExecutionCreateRequest, launchPlan *admin.LaunchPlan) *admin.AuthRole {
- if request.Spec.AuthRole != nil {
- return request.Spec.AuthRole
+ if request.GetSpec().GetAuthRole() != nil {
+ return request.GetSpec().GetAuthRole()
}
- if launchPlan == nil || launchPlan.Spec == nil {
+ if launchPlan == nil || launchPlan.GetSpec() == nil {
return &admin.AuthRole{}
}
// Set role permissions based on launch plan Auth values.
// The branched-ness of this check is due to the presence numerous deprecated fields
- if launchPlan.Spec.GetAuthRole() != nil {
- return launchPlan.Spec.GetAuthRole()
+ if launchPlan.GetSpec().GetAuthRole() != nil {
+ return launchPlan.GetSpec().GetAuthRole()
} else if launchPlan.GetSpec().GetAuth() != nil {
return &admin.AuthRole{
- AssumableIamRole: launchPlan.GetSpec().GetAuth().AssumableIamRole,
- KubernetesServiceAccount: launchPlan.GetSpec().GetAuth().KubernetesServiceAccount,
+ AssumableIamRole: launchPlan.GetSpec().GetAuth().GetAssumableIamRole(),
+ KubernetesServiceAccount: launchPlan.GetSpec().GetAuth().GetKubernetesServiceAccount(),
}
} else if len(launchPlan.GetSpec().GetRole()) > 0 {
return &admin.AuthRole{
@@ -722,17 +722,17 @@ func resolveAuthRole(request *admin.ExecutionCreateRequest, launchPlan *admin.La
func resolveSecurityCtx(ctx context.Context, executionConfigSecurityCtx *core.SecurityContext,
resolvedAuthRole *admin.AuthRole) *core.SecurityContext {
// Use security context from the executionConfigSecurityCtx if its set and non empty or else resolve from authRole
- if executionConfigSecurityCtx != nil && executionConfigSecurityCtx.RunAs != nil &&
- (len(executionConfigSecurityCtx.RunAs.K8SServiceAccount) > 0 ||
- len(executionConfigSecurityCtx.RunAs.IamRole) > 0 ||
- len(executionConfigSecurityCtx.RunAs.ExecutionIdentity) > 0) {
+ if executionConfigSecurityCtx != nil && executionConfigSecurityCtx.GetRunAs() != nil &&
+ (len(executionConfigSecurityCtx.GetRunAs().GetK8SServiceAccount()) > 0 ||
+ len(executionConfigSecurityCtx.GetRunAs().GetIamRole()) > 0 ||
+ len(executionConfigSecurityCtx.GetRunAs().GetExecutionIdentity()) > 0) {
return executionConfigSecurityCtx
}
logger.Warn(ctx, "Setting security context from auth Role")
return &core.SecurityContext{
RunAs: &core.Identity{
- IamRole: resolvedAuthRole.AssumableIamRole,
- K8SServiceAccount: resolvedAuthRole.KubernetesServiceAccount,
+ IamRole: resolvedAuthRole.GetAssumableIamRole(),
+ K8SServiceAccount: resolvedAuthRole.GetKubernetesServiceAccount(),
},
}
}
@@ -755,7 +755,7 @@ func (m *ExecutionManager) getStringFromInput(ctx context.Context, inputBinding
case *core.Primitive_Integer:
strVal = p.GetStringValue()
case *core.Primitive_Datetime:
- t := time.Unix(p.GetDatetime().Seconds, int64(p.GetDatetime().Nanos))
+ t := time.Unix(p.GetDatetime().GetSeconds(), int64(p.GetDatetime().GetNanos()))
t = t.In(time.UTC)
strVal = t.Format("2006-01-02")
case *core.Primitive_StringValue:
@@ -812,7 +812,7 @@ func (m *ExecutionManager) fillInTemplateArgs(ctx context.Context, query *core.A
var partitions map[string]*core.LabelValue
if artifactID.GetPartitions().GetValue() != nil {
- partitions = make(map[string]*core.LabelValue, len(artifactID.GetPartitions().Value))
+ partitions = make(map[string]*core.LabelValue, len(artifactID.GetPartitions().GetValue()))
for k, v := range artifactID.GetPartitions().GetValue() {
newValue, err := m.getLabelValue(ctx, v, inputs)
if err != nil {
@@ -825,20 +825,20 @@ func (m *ExecutionManager) fillInTemplateArgs(ctx context.Context, query *core.A
var timePartition *core.TimePartition
if artifactID.GetTimePartition().GetValue() != nil {
- if artifactID.GetTimePartition().Value.GetTimeValue() != nil {
+ if artifactID.GetTimePartition().GetValue().GetTimeValue() != nil {
// If the time value is set, then just pass it through, nothing to fill in.
timePartition = artifactID.GetTimePartition()
- } else if artifactID.GetTimePartition().Value.GetInputBinding() != nil {
+ } else if artifactID.GetTimePartition().GetValue().GetInputBinding() != nil {
// Evaluate the time partition input binding
- lit, ok := inputs[artifactID.GetTimePartition().Value.GetInputBinding().GetVar()]
+ lit, ok := inputs[artifactID.GetTimePartition().GetValue().GetInputBinding().GetVar()]
if !ok {
- return query, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "time partition input binding var [%s] not found in inputs %v", artifactID.GetTimePartition().Value.GetInputBinding().GetVar(), inputs)
+ return query, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "time partition input binding var [%s] not found in inputs %v", artifactID.GetTimePartition().GetValue().GetInputBinding().GetVar(), inputs)
}
if lit.GetScalar().GetPrimitive().GetDatetime() == nil {
return query, errors.NewFlyteAdminErrorf(codes.InvalidArgument,
"time partition binding to input var [%s] failing because %v is not a datetime",
- artifactID.GetTimePartition().Value.GetInputBinding().GetVar(), lit)
+ artifactID.GetTimePartition().GetValue().GetInputBinding().GetVar(), lit)
}
timePartition = &core.TimePartition{
Value: &core.LabelValue{
@@ -881,8 +881,8 @@ func (m *ExecutionManager) launchExecutionAndPrepareModel(
return nil, nil, nil, err
}
- if request.Spec.LaunchPlan.ResourceType == core.ResourceType_TASK {
- logger.Debugf(ctx, "Launching single task execution with [%+v]", request.Spec.LaunchPlan)
+ if request.GetSpec().GetLaunchPlan().GetResourceType() == core.ResourceType_TASK {
+ logger.Debugf(ctx, "Launching single task execution with [%+v]", request.GetSpec().GetLaunchPlan())
// When tasks can have defaults this will need to handle Artifacts as well.
ctx, model, err := m.launchSingleTaskExecution(ctx, request, requestedAt)
return ctx, model, nil, err
@@ -892,7 +892,7 @@ func (m *ExecutionManager) launchExecutionAndPrepareModel(
func (m *ExecutionManager) launchExecution(
ctx context.Context, request *admin.ExecutionCreateRequest, requestedAt time.Time) (context.Context, *models.Execution, []*models.ExecutionTag, error) {
- launchPlanModel, err := util.GetLaunchPlanModel(ctx, m.db, request.Spec.LaunchPlan)
+ launchPlanModel, err := util.GetLaunchPlanModel(ctx, m.db, request.GetSpec().GetLaunchPlan())
if err != nil {
logger.Debugf(ctx, "Failed to get launch plan model for ExecutionCreateRequest %+v with err %v", request, err)
return nil, nil, nil, err
@@ -905,38 +905,38 @@ func (m *ExecutionManager) launchExecution(
var lpExpectedInputs *core.ParameterMap
var usedArtifactIDs []*core.ArtifactID
- lpExpectedInputs = launchPlan.Closure.ExpectedInputs
+ lpExpectedInputs = launchPlan.GetClosure().GetExpectedInputs()
// Artifacts retrieved will need to be stored somewhere to ensure that we can re-emit events if necessary
// in the future, and also to make sure that relaunch and recover can use it if necessary.
executionInputs, err := validation.CheckAndFetchInputsForExecution(
- request.Inputs,
- launchPlan.Spec.FixedInputs,
+ request.GetInputs(),
+ launchPlan.GetSpec().GetFixedInputs(),
lpExpectedInputs,
)
if err != nil {
logger.Debugf(ctx, "Failed to CheckAndFetchInputsForExecution with request.Inputs: %+v"+
"fixed inputs: %+v and expected inputs: %+v with err %v",
- request.Inputs, launchPlan.Spec.FixedInputs, lpExpectedInputs, err)
+ request.GetInputs(), launchPlan.GetSpec().GetFixedInputs(), lpExpectedInputs, err)
return nil, nil, nil, err
}
- workflowModel, err := util.GetWorkflowModel(ctx, m.db, launchPlan.Spec.WorkflowId)
+ workflowModel, err := util.GetWorkflowModel(ctx, m.db, launchPlan.GetSpec().GetWorkflowId())
if err != nil {
- logger.Debugf(ctx, "Failed to get workflow with id %+v with err %v", launchPlan.Spec.WorkflowId, err)
+ logger.Debugf(ctx, "Failed to get workflow with id %+v with err %v", launchPlan.GetSpec().GetWorkflowId(), err)
return nil, nil, nil, err
}
workflow, err := transformers.FromWorkflowModel(workflowModel)
if err != nil {
- logger.Debugf(ctx, "Failed to get workflow with id %+v with err %v", launchPlan.Spec.WorkflowId, err)
+ logger.Debugf(ctx, "Failed to get workflow with id %+v with err %v", launchPlan.GetSpec().GetWorkflowId(), err)
return nil, nil, nil, err
}
name := util.GetExecutionName(request)
workflowExecutionID := &core.WorkflowExecutionIdentifier{
- Project: request.Project,
- Domain: request.Domain,
+ Project: request.GetProject(),
+ Domain: request.GetDomain(),
Name: name,
}
@@ -947,7 +947,7 @@ func (m *ExecutionManager) launchExecution(
var err error
closure, err = util.FetchAndGetWorkflowClosure(groupCtx, m.storageClient, workflowModel.RemoteClosureIdentifier)
if err != nil {
- logger.Debugf(ctx, "Failed to get workflow with id %+v with err %v", launchPlan.Spec.WorkflowId, err)
+ logger.Debugf(ctx, "Failed to get workflow with id %+v with err %v", launchPlan.GetSpec().GetWorkflowId(), err)
}
return err
})
@@ -956,15 +956,15 @@ func (m *ExecutionManager) launchExecution(
group.Go(func() error {
var err error
inputsURI, err = common.OffloadLiteralMap(groupCtx, m.storageClient, executionInputs,
- workflowExecutionID.Project, workflowExecutionID.Domain, workflowExecutionID.Name, shared.Inputs)
+ workflowExecutionID.GetProject(), workflowExecutionID.GetDomain(), workflowExecutionID.GetName(), shared.Inputs)
return err
})
var userInputsURI storage.DataReference
group.Go(func() error {
var err error
- userInputsURI, err = common.OffloadLiteralMap(groupCtx, m.storageClient, request.Inputs,
- workflowExecutionID.Project, workflowExecutionID.Domain, workflowExecutionID.Name, shared.UserInputs)
+ userInputsURI, err = common.OffloadLiteralMap(groupCtx, m.storageClient, request.GetInputs(),
+ workflowExecutionID.GetProject(), workflowExecutionID.GetDomain(), workflowExecutionID.GetName(), shared.UserInputs)
return err
})
@@ -972,12 +972,12 @@ func (m *ExecutionManager) launchExecution(
if err != nil {
return nil, nil, nil, err
}
- closure.CreatedAt = workflow.Closure.CreatedAt
+ closure.CreatedAt = workflow.GetClosure().GetCreatedAt()
workflow.Closure = closure
ctx = getExecutionContext(ctx, workflowExecutionID)
- var requestSpec = request.Spec
- if requestSpec.Metadata == nil {
+ var requestSpec = request.GetSpec()
+ if requestSpec.GetMetadata() == nil {
requestSpec.Metadata = &admin.ExecutionMetadata{}
}
requestSpec.Metadata.Principal = getUser(ctx)
@@ -992,13 +992,13 @@ func (m *ExecutionManager) launchExecution(
}
// Dynamically assign task resource defaults.
- platformTaskResources := util.GetTaskResources(ctx, workflow.Id, m.resourceManager, m.config.TaskResourceConfiguration())
- for _, task := range workflow.Closure.CompiledWorkflow.Tasks {
+ platformTaskResources := util.GetTaskResources(ctx, workflow.GetId(), m.resourceManager, m.config.TaskResourceConfiguration())
+ for _, task := range workflow.GetClosure().GetCompiledWorkflow().GetTasks() {
m.setCompiledTaskDefaults(ctx, task, platformTaskResources)
}
// Dynamically assign execution queues.
- m.populateExecutionQueue(ctx, workflow.Id, workflow.Closure.CompiledWorkflow)
+ m.populateExecutionQueue(ctx, workflow.GetId(), workflow.GetClosure().GetCompiledWorkflow())
executionConfig, err := m.getExecutionConfig(ctx, request, launchPlan)
if err != nil {
@@ -1006,23 +1006,23 @@ func (m *ExecutionManager) launchExecution(
}
namespace := common.GetNamespaceName(
- m.config.NamespaceMappingConfiguration().GetNamespaceTemplate(), workflowExecutionID.Project, workflowExecutionID.Domain)
+ m.config.NamespaceMappingConfiguration().GetNamespaceTemplate(), workflowExecutionID.GetProject(), workflowExecutionID.GetDomain())
- labels, err := resolveStringMap(executionConfig.GetLabels(), launchPlan.Spec.Labels, "labels", m.config.RegistrationValidationConfiguration().GetMaxLabelEntries())
+ labels, err := resolveStringMap(executionConfig.GetLabels(), launchPlan.GetSpec().GetLabels(), "labels", m.config.RegistrationValidationConfiguration().GetMaxLabelEntries())
if err != nil {
return nil, nil, nil, err
}
- labels, err = m.addProjectLabels(ctx, request.Project, labels)
+ labels, err = m.addProjectLabels(ctx, request.GetProject(), labels)
if err != nil {
return nil, nil, nil, err
}
- annotations, err := resolveStringMap(executionConfig.GetAnnotations(), launchPlan.Spec.Annotations, "annotations", m.config.RegistrationValidationConfiguration().GetMaxAnnotationEntries())
+ annotations, err := resolveStringMap(executionConfig.GetAnnotations(), launchPlan.GetSpec().GetAnnotations(), "annotations", m.config.RegistrationValidationConfiguration().GetMaxAnnotationEntries())
if err != nil {
return nil, nil, nil, err
}
var rawOutputDataConfig *admin.RawOutputDataConfig
- if executionConfig.RawOutputDataConfig != nil {
- rawOutputDataConfig = executionConfig.RawOutputDataConfig
+ if executionConfig.GetRawOutputDataConfig() != nil {
+ rawOutputDataConfig = executionConfig.GetRawOutputDataConfig()
}
clusterAssignment, err := m.getClusterAssignment(ctx, request)
@@ -1031,8 +1031,8 @@ func (m *ExecutionManager) launchExecution(
}
var executionClusterLabel *admin.ExecutionClusterLabel
- if requestSpec.ExecutionClusterLabel != nil {
- executionClusterLabel = requestSpec.ExecutionClusterLabel
+ if requestSpec.GetExecutionClusterLabel() != nil {
+ executionClusterLabel = requestSpec.GetExecutionClusterLabel()
}
executionParameters := workflowengineInterfaces.ExecutionParameters{
@@ -1049,7 +1049,7 @@ func (m *ExecutionManager) launchExecution(
ExecutionClusterLabel: executionClusterLabel,
}
- overrides, err := m.addPluginOverrides(ctx, workflowExecutionID, launchPlan.GetSpec().WorkflowId.Name, launchPlan.Id.Name)
+ overrides, err := m.addPluginOverrides(ctx, workflowExecutionID, launchPlan.GetSpec().GetWorkflowId().GetName(), launchPlan.GetId().GetName())
if err != nil {
return nil, nil, nil, err
}
@@ -1057,9 +1057,9 @@ func (m *ExecutionManager) launchExecution(
executionParameters.TaskPluginOverrides = overrides
}
- if request.Spec.Metadata != nil && request.Spec.Metadata.ReferenceExecution != nil &&
- request.Spec.Metadata.Mode == admin.ExecutionMetadata_RECOVERED {
- executionParameters.RecoveryExecution = request.Spec.Metadata.ReferenceExecution
+ if request.GetSpec().GetMetadata() != nil && request.GetSpec().GetMetadata().GetReferenceExecution() != nil &&
+ request.GetSpec().GetMetadata().GetMode() == admin.ExecutionMetadata_RECOVERED {
+ executionParameters.RecoveryExecution = request.GetSpec().GetMetadata().GetReferenceExecution()
}
executionCreatedAt := time.Now()
@@ -1068,12 +1068,12 @@ func (m *ExecutionManager) launchExecution(
// Request notification settings takes precedence over the launch plan settings.
// If there is no notification in the request and DisableAll is not true, use the settings from the launch plan.
var notificationsSettings []*admin.Notification
- if launchPlan.Spec.GetEntityMetadata() != nil {
- notificationsSettings = launchPlan.Spec.EntityMetadata.GetNotifications()
+ if launchPlan.GetSpec().GetEntityMetadata() != nil {
+ notificationsSettings = launchPlan.GetSpec().GetEntityMetadata().GetNotifications()
}
- if requestSpec.GetNotifications() != nil && requestSpec.GetNotifications().Notifications != nil &&
- len(requestSpec.GetNotifications().Notifications) > 0 {
- notificationsSettings = requestSpec.GetNotifications().Notifications
+ if requestSpec.GetNotifications() != nil && requestSpec.GetNotifications().GetNotifications() != nil &&
+ len(requestSpec.GetNotifications().GetNotifications()) > 0 {
+ notificationsSettings = requestSpec.GetNotifications().GetNotifications()
} else if requestSpec.GetDisableAll() {
notificationsSettings = make([]*admin.Notification, 0)
}
@@ -1086,13 +1086,13 @@ func (m *ExecutionManager) launchExecution(
// The execution is not considered running until the propeller sends a specific event saying so.
CreatedAt: m._clock.Now(),
Notifications: notificationsSettings,
- WorkflowIdentifier: workflow.Id,
+ WorkflowIdentifier: workflow.GetId(),
ParentNodeExecutionID: parentNodeExecutionID,
SourceExecutionID: sourceExecutionID,
InputsURI: inputsURI,
UserInputsURI: userInputsURI,
- SecurityContext: executionConfig.SecurityContext,
- LaunchEntity: launchPlan.Id.ResourceType,
+ SecurityContext: executionConfig.GetSecurityContext(),
+ LaunchEntity: launchPlan.GetId().GetResourceType(),
Namespace: namespace,
}
@@ -1100,9 +1100,9 @@ func (m *ExecutionManager) launchExecution(
execInfo, execErr := workflowExecutor.Execute(ctx, workflowengineInterfaces.ExecutionData{
Namespace: namespace,
ExecutionID: workflowExecutionID,
- ReferenceWorkflowName: workflow.Id.Name,
- ReferenceLaunchPlanName: launchPlan.Id.Name,
- WorkflowClosure: workflow.Closure.CompiledWorkflow,
+ ReferenceWorkflowName: workflow.GetId().GetName(),
+ ReferenceLaunchPlanName: launchPlan.GetId().GetName(),
+ WorkflowClosure: workflow.GetClosure().GetCompiledWorkflow(),
WorkflowClosureReference: storage.DataReference(workflowModel.RemoteClosureIdentifier),
ExecutionParameters: executionParameters,
OffloadedInputsReference: inputsURI,
@@ -1160,7 +1160,7 @@ func (m *ExecutionManager) CreateExecution(
*admin.ExecutionCreateResponse, error) {
// Prior to flyteidl v0.15.0, Inputs was held in ExecutionSpec. Ensure older clients continue to work.
- if request.Inputs == nil || len(request.Inputs.Literals) == 0 {
+ if request.GetInputs() == nil || len(request.GetInputs().GetLiterals()) == 0 {
request.Inputs = request.GetSpec().GetInputs()
}
var executionModel *models.Execution
@@ -1182,7 +1182,7 @@ func (m *ExecutionManager) CreateExecution(
func (m *ExecutionManager) RelaunchExecution(
ctx context.Context, request *admin.ExecutionRelaunchRequest, requestedAt time.Time) (
*admin.ExecutionCreateResponse, error) {
- existingExecutionModel, err := util.GetExecutionModel(ctx, m.db, request.Id)
+ existingExecutionModel, err := util.GetExecutionModel(ctx, m.db, request.GetId())
if err != nil {
logger.Debugf(ctx, "Failed to get execution model for request [%+v] with err %v", request, err)
return nil, err
@@ -1192,8 +1192,8 @@ func (m *ExecutionManager) RelaunchExecution(
return nil, err
}
- executionSpec := existingExecution.Spec
- if executionSpec.Metadata == nil {
+ executionSpec := existingExecution.GetSpec()
+ if executionSpec.GetMetadata() == nil {
executionSpec.Metadata = &admin.ExecutionMetadata{}
}
var inputs *core.LiteralMap
@@ -1209,17 +1209,17 @@ func (m *ExecutionManager) RelaunchExecution(
if err != nil {
return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal spec")
}
- inputs = spec.Inputs
+ inputs = spec.GetInputs()
}
executionSpec.Metadata.Mode = admin.ExecutionMetadata_RELAUNCH
- executionSpec.Metadata.ReferenceExecution = existingExecution.Id
+ executionSpec.Metadata.ReferenceExecution = existingExecution.GetId()
executionSpec.OverwriteCache = request.GetOverwriteCache()
var executionModel *models.Execution
var executionTagModel []*models.ExecutionTag
ctx, executionModel, executionTagModel, err = m.launchExecutionAndPrepareModel(ctx, &admin.ExecutionCreateRequest{
- Project: request.Id.Project,
- Domain: request.Id.Domain,
- Name: request.Name,
+ Project: request.GetId().GetProject(),
+ Domain: request.GetId().GetDomain(),
+ Name: request.GetName(),
Spec: executionSpec,
Inputs: inputs,
}, requestedAt)
@@ -1231,7 +1231,7 @@ func (m *ExecutionManager) RelaunchExecution(
if err != nil {
return nil, err
}
- logger.Debugf(ctx, "Successfully relaunched [%+v] as [%+v]", request.Id, workflowExecutionIdentifier)
+ logger.Debugf(ctx, "Successfully relaunched [%+v] as [%+v]", request.GetId(), workflowExecutionIdentifier)
return &admin.ExecutionCreateResponse{
Id: workflowExecutionIdentifier,
}, nil
@@ -1240,7 +1240,7 @@ func (m *ExecutionManager) RelaunchExecution(
func (m *ExecutionManager) RecoverExecution(
ctx context.Context, request *admin.ExecutionRecoverRequest, requestedAt time.Time) (
*admin.ExecutionCreateResponse, error) {
- existingExecutionModel, err := util.GetExecutionModel(ctx, m.db, request.Id)
+ existingExecutionModel, err := util.GetExecutionModel(ctx, m.db, request.GetId())
if err != nil {
logger.Debugf(ctx, "Failed to get execution model for request [%+v] with err %v", request, err)
return nil, err
@@ -1250,8 +1250,8 @@ func (m *ExecutionManager) RecoverExecution(
return nil, err
}
- executionSpec := existingExecution.Spec
- if executionSpec.Metadata == nil {
+ executionSpec := existingExecution.GetSpec()
+ if executionSpec.GetMetadata() == nil {
executionSpec.Metadata = &admin.ExecutionMetadata{}
}
var inputs *core.LiteralMap
@@ -1261,17 +1261,17 @@ func (m *ExecutionManager) RecoverExecution(
return nil, err
}
}
- if request.Metadata != nil {
- executionSpec.Metadata.ParentNodeExecution = request.Metadata.ParentNodeExecution
+ if request.GetMetadata() != nil {
+ executionSpec.Metadata.ParentNodeExecution = request.GetMetadata().GetParentNodeExecution()
}
executionSpec.Metadata.Mode = admin.ExecutionMetadata_RECOVERED
- executionSpec.Metadata.ReferenceExecution = existingExecution.Id
+ executionSpec.Metadata.ReferenceExecution = existingExecution.GetId()
var executionModel *models.Execution
var executionTagModel []*models.ExecutionTag
ctx, executionModel, executionTagModel, err = m.launchExecutionAndPrepareModel(ctx, &admin.ExecutionCreateRequest{
- Project: request.Id.Project,
- Domain: request.Id.Domain,
- Name: request.Name,
+ Project: request.GetId().GetProject(),
+ Domain: request.GetId().GetDomain(),
+ Name: request.GetName(),
Spec: executionSpec,
Inputs: inputs,
}, requestedAt)
@@ -1283,7 +1283,7 @@ func (m *ExecutionManager) RecoverExecution(
if err != nil {
return nil, err
}
- logger.Infof(ctx, "Successfully recovered [%+v] as [%+v]", request.Id, workflowExecutionIdentifier)
+ logger.Infof(ctx, "Successfully recovered [%+v] as [%+v]", request.GetId(), workflowExecutionIdentifier)
return &admin.ExecutionCreateResponse{
Id: workflowExecutionIdentifier,
}, nil
@@ -1304,20 +1304,20 @@ func (m *ExecutionManager) emitScheduledWorkflowMetrics(
"[%s/%s/%s]", executionModel.Project, executionModel.Domain, executionModel.Name)
return
}
- launchPlan, err := util.GetLaunchPlan(context.Background(), m.db, execution.Spec.LaunchPlan)
+ launchPlan, err := util.GetLaunchPlan(context.Background(), m.db, execution.GetSpec().GetLaunchPlan())
if err != nil {
logger.Warningf(context.Background(),
"failed to find launch plan when emitting scheduled workflow execution stats with for "+
- "execution: [%+v] and launch plan [%+v]", execution.Id, execution.Spec.LaunchPlan)
+ "execution: [%+v] and launch plan [%+v]", execution.GetId(), execution.GetSpec().GetLaunchPlan())
return
}
- if launchPlan.Spec.EntityMetadata == nil ||
- launchPlan.Spec.EntityMetadata.Schedule == nil ||
- launchPlan.Spec.EntityMetadata.Schedule.KickoffTimeInputArg == "" {
+ if launchPlan.GetSpec().GetEntityMetadata() == nil ||
+ launchPlan.GetSpec().GetEntityMetadata().GetSchedule() == nil ||
+ launchPlan.GetSpec().GetEntityMetadata().GetSchedule().GetKickoffTimeInputArg() == "" {
// Kickoff time arguments aren't always required for scheduled workflows.
logger.Debugf(context.Background(), "no kickoff time to report for scheduled workflow execution [%+v]",
- execution.Id)
+ execution.GetId())
return
}
@@ -1327,13 +1327,13 @@ func (m *ExecutionManager) emitScheduledWorkflowMetrics(
logger.Errorf(ctx, "Failed to find inputs for emitting schedule delay event from uri: [%v]", executionModel.InputsURI)
return
}
- scheduledKickoffTimeProto := inputs.Literals[launchPlan.Spec.EntityMetadata.Schedule.KickoffTimeInputArg]
+ scheduledKickoffTimeProto := inputs.GetLiterals()[launchPlan.GetSpec().GetEntityMetadata().GetSchedule().GetKickoffTimeInputArg()]
if scheduledKickoffTimeProto == nil || scheduledKickoffTimeProto.GetScalar() == nil ||
scheduledKickoffTimeProto.GetScalar().GetPrimitive() == nil ||
scheduledKickoffTimeProto.GetScalar().GetPrimitive().GetDatetime() == nil {
logger.Warningf(context.Background(),
"failed to find scheduled kickoff time datetime value for scheduled workflow execution [%+v] "+
- "although one was expected", execution.Id)
+ "although one was expected", execution.GetId())
return
}
scheduledKickoffTime, err := ptypes.Timestamp(scheduledKickoffTimeProto.GetScalar().GetPrimitive().GetDatetime())
@@ -1347,16 +1347,16 @@ func (m *ExecutionManager) emitScheduledWorkflowMetrics(
return
}
- domainCounterMap, ok := m.userMetrics.ScheduledExecutionDelays[execution.Id.Project]
+ domainCounterMap, ok := m.userMetrics.ScheduledExecutionDelays[execution.GetId().GetProject()]
if !ok {
domainCounterMap = make(map[string]*promutils.StopWatch)
- m.userMetrics.ScheduledExecutionDelays[execution.Id.Project] = domainCounterMap
+ m.userMetrics.ScheduledExecutionDelays[execution.GetId().GetProject()] = domainCounterMap
}
var watch *promutils.StopWatch
- watch, ok = domainCounterMap[execution.Id.Domain]
+ watch, ok = domainCounterMap[execution.GetId().GetDomain()]
if !ok {
- newWatch, err := m.systemMetrics.Scope.NewSubScope(execution.Id.Project).NewSubScope(execution.Id.Domain).NewStopWatch(
+ newWatch, err := m.systemMetrics.Scope.NewSubScope(execution.GetId().GetProject()).NewSubScope(execution.GetId().GetDomain()).NewStopWatch(
"scheduled_execution_delay",
"delay between scheduled execution time and time execution was observed running",
time.Nanosecond)
@@ -1367,7 +1367,7 @@ func (m *ExecutionManager) emitScheduledWorkflowMetrics(
return
}
watch = &newWatch
- domainCounterMap[execution.Id.Domain] = watch
+ domainCounterMap[execution.GetId().GetDomain()] = watch
}
watch.Observe(scheduledKickoffTime, runningEventTime)
}
@@ -1421,30 +1421,30 @@ func (m *ExecutionManager) CreateWorkflowEvent(ctx context.Context, request *adm
*admin.WorkflowExecutionEventResponse, error) {
err := validation.ValidateCreateWorkflowEventRequest(request, m.config.ApplicationConfiguration().GetRemoteDataConfig().MaxSizeInBytes)
if err != nil {
- logger.Debugf(ctx, "received invalid CreateWorkflowEventRequest [%s]: %v", request.RequestId, err)
+ logger.Debugf(ctx, "received invalid CreateWorkflowEventRequest [%s]: %v", request.GetRequestId(), err)
return nil, err
}
- ctx = getExecutionContext(ctx, request.Event.ExecutionId)
+ ctx = getExecutionContext(ctx, request.GetEvent().GetExecutionId())
logger.Debugf(ctx, "Received workflow execution event for [%+v] transitioning to phase [%v]",
- request.Event.ExecutionId, request.Event.Phase)
+ request.GetEvent().GetExecutionId(), request.GetEvent().GetPhase())
- executionModel, err := util.GetExecutionModel(ctx, m.db, request.Event.ExecutionId)
+ executionModel, err := util.GetExecutionModel(ctx, m.db, request.GetEvent().GetExecutionId())
if err != nil {
logger.Debugf(ctx, "failed to find execution [%+v] for recorded event [%s]: %v",
- request.Event.ExecutionId, request.RequestId, err)
+ request.GetEvent().GetExecutionId(), request.GetRequestId(), err)
return nil, err
}
wfExecPhase := core.WorkflowExecution_Phase(core.WorkflowExecution_Phase_value[executionModel.Phase])
// Subsequent queued events announcing a cluster reassignment are permitted.
- if request.Event.Phase != core.WorkflowExecution_QUEUED {
- if wfExecPhase == request.Event.Phase {
+ if request.GetEvent().GetPhase() != core.WorkflowExecution_QUEUED {
+ if wfExecPhase == request.GetEvent().GetPhase() {
logger.Debugf(ctx, "This phase %s was already recorded for workflow execution %v",
- wfExecPhase.String(), request.Event.ExecutionId)
+ wfExecPhase.String(), request.GetEvent().GetExecutionId())
return nil, errors.NewFlyteAdminErrorf(codes.AlreadyExists,
"This phase %s was already recorded for workflow execution %v",
- wfExecPhase.String(), request.Event.ExecutionId)
- } else if err := validation.ValidateCluster(ctx, executionModel.Cluster, request.Event.ProducerId); err != nil {
+ wfExecPhase.String(), request.GetEvent().GetExecutionId())
+ } else if err := validation.ValidateCluster(ctx, executionModel.Cluster, request.GetEvent().GetProducerId()); err != nil {
// Only perform event cluster validation **after** an execution has moved on from QUEUED.
return nil, err
}
@@ -1453,22 +1453,22 @@ func (m *ExecutionManager) CreateWorkflowEvent(ctx context.Context, request *adm
if common.IsExecutionTerminal(wfExecPhase) {
// Cannot go backwards in time from a terminal state to anything else
curPhase := wfExecPhase.String()
- errorMsg := fmt.Sprintf("Invalid phase change from %s to %s for workflow execution %v", curPhase, request.Event.Phase.String(), request.Event.ExecutionId)
+ errorMsg := fmt.Sprintf("Invalid phase change from %s to %s for workflow execution %v", curPhase, request.GetEvent().GetPhase().String(), request.GetEvent().GetExecutionId())
return nil, errors.NewAlreadyInTerminalStateError(ctx, errorMsg, curPhase)
- } else if wfExecPhase == core.WorkflowExecution_RUNNING && request.Event.Phase == core.WorkflowExecution_QUEUED {
+ } else if wfExecPhase == core.WorkflowExecution_RUNNING && request.GetEvent().GetPhase() == core.WorkflowExecution_QUEUED {
// Cannot go back in time from RUNNING -> QUEUED
return nil, errors.NewFlyteAdminErrorf(codes.FailedPrecondition,
"Cannot go from %s to %s for workflow execution %v",
- wfExecPhase.String(), request.Event.Phase.String(), request.Event.ExecutionId)
- } else if wfExecPhase == core.WorkflowExecution_ABORTING && !common.IsExecutionTerminal(request.Event.Phase) {
+ wfExecPhase.String(), request.GetEvent().GetPhase().String(), request.GetEvent().GetExecutionId())
+ } else if wfExecPhase == core.WorkflowExecution_ABORTING && !common.IsExecutionTerminal(request.GetEvent().GetPhase()) {
return nil, errors.NewFlyteAdminErrorf(codes.FailedPrecondition,
- "Invalid phase change from aborting to %s for workflow execution %v", request.Event.Phase.String(), request.Event.ExecutionId)
+ "Invalid phase change from aborting to %s for workflow execution %v", request.GetEvent().GetPhase().String(), request.GetEvent().GetExecutionId())
}
err = transformers.UpdateExecutionModelState(ctx, executionModel, request, m.config.ApplicationConfiguration().GetRemoteDataConfig().InlineEventDataPolicy, m.storageClient)
if err != nil {
logger.Debugf(ctx, "failed to transform updated workflow execution model [%+v] after receiving event with err: %v",
- request.Event.ExecutionId, err)
+ request.GetEvent().GetExecutionId(), err)
return nil, err
}
err = m.db.ExecutionRepo().Update(ctx, *executionModel)
@@ -1479,28 +1479,28 @@ func (m *ExecutionManager) CreateWorkflowEvent(ctx context.Context, request *adm
}
m.dbEventWriter.Write(request)
- if request.Event.Phase == core.WorkflowExecution_RUNNING {
+ if request.GetEvent().GetPhase() == core.WorkflowExecution_RUNNING {
// Workflow executions are created in state "UNDEFINED". All the time up until a RUNNING event is received is
// considered system-induced delay.
if executionModel.Mode == int32(admin.ExecutionMetadata_SCHEDULED) {
- go m.emitScheduledWorkflowMetrics(ctx, executionModel, request.Event.OccurredAt)
+ go m.emitScheduledWorkflowMetrics(ctx, executionModel, request.GetEvent().GetOccurredAt())
}
- } else if common.IsExecutionTerminal(request.Event.Phase) {
- if request.Event.Phase == core.WorkflowExecution_FAILED {
+ } else if common.IsExecutionTerminal(request.GetEvent().GetPhase()) {
+ if request.GetEvent().GetPhase() == core.WorkflowExecution_FAILED {
// request.Event is expected to be of type WorkflowExecutionEvent_Error when workflow fails.
// if not, log the error and continue
- if err := request.Event.GetError(); err != nil {
- ctx = context.WithValue(ctx, common.ErrorKindKey, err.Kind.String())
+ if err := request.GetEvent().GetError(); err != nil {
+ ctx = context.WithValue(ctx, common.ErrorKindKey, err.GetKind().String())
} else {
logger.Warning(ctx, "Failed to parse error for FAILED request [%+v]", request)
}
}
m.systemMetrics.ActiveExecutions.Dec()
- m.systemMetrics.ExecutionsTerminated.Inc(contextutils.WithPhase(ctx, request.Event.Phase.String()))
- go m.emitOverallWorkflowExecutionTime(executionModel, request.Event.OccurredAt)
- if request.Event.GetOutputData() != nil {
- m.userMetrics.WorkflowExecutionOutputBytes.Observe(float64(proto.Size(request.Event.GetOutputData())))
+ m.systemMetrics.ExecutionsTerminated.Inc(contextutils.WithPhase(ctx, request.GetEvent().GetPhase().String()))
+ go m.emitOverallWorkflowExecutionTime(executionModel, request.GetEvent().GetOccurredAt())
+ if request.GetEvent().GetOutputData() != nil {
+ m.userMetrics.WorkflowExecutionOutputBytes.Observe(float64(proto.Size(request.GetEvent().GetOutputData())))
}
err = m.publishNotifications(ctx, request, *executionModel)
@@ -1515,14 +1515,14 @@ func (m *ExecutionManager) CreateWorkflowEvent(ctx context.Context, request *adm
if err := m.eventPublisher.Publish(ctx, proto.MessageName(request), request); err != nil {
m.systemMetrics.PublishEventError.Inc()
- logger.Infof(ctx, "error publishing event [%+v] with err: [%v]", request.RequestId, err)
+ logger.Infof(ctx, "error publishing event [%+v] with err: [%v]", request.GetRequestId(), err)
}
go func() {
ceCtx := context.TODO()
if err := m.cloudEventPublisher.Publish(ceCtx, proto.MessageName(request), request); err != nil {
m.systemMetrics.PublishEventError.Inc()
- logger.Infof(ctx, "error publishing cloud event [%+v] with err: [%v]", request.RequestId, err)
+ logger.Infof(ctx, "error publishing cloud event [%+v] with err: [%v]", request.GetRequestId(), err)
}
}()
@@ -1531,12 +1531,12 @@ func (m *ExecutionManager) CreateWorkflowEvent(ctx context.Context, request *adm
func (m *ExecutionManager) GetExecution(
ctx context.Context, request *admin.WorkflowExecutionGetRequest) (*admin.Execution, error) {
- if err := validation.ValidateWorkflowExecutionIdentifier(request.Id); err != nil {
+ if err := validation.ValidateWorkflowExecutionIdentifier(request.GetId()); err != nil {
logger.Debugf(ctx, "GetExecution request [%+v] failed validation with err: %v", request, err)
return nil, err
}
- ctx = getExecutionContext(ctx, request.Id)
- executionModel, err := util.GetExecutionModel(ctx, m.db, request.Id)
+ ctx = getExecutionContext(ctx, request.GetId())
+ executionModel, err := util.GetExecutionModel(ctx, m.db, request.GetId())
if err != nil {
logger.Debugf(ctx, "Failed to get execution model for request [%+v] with err: %v", request, err)
return nil, err
@@ -1547,7 +1547,7 @@ func (m *ExecutionManager) GetExecution(
DefaultNamespace: namespace,
})
if transformerErr != nil {
- logger.Debugf(ctx, "Failed to transform execution model [%+v] to proto object with err: %v", request.Id,
+ logger.Debugf(ctx, "Failed to transform execution model [%+v] to proto object with err: %v", request.GetId(),
transformerErr)
return nil, transformerErr
}
@@ -1557,18 +1557,18 @@ func (m *ExecutionManager) GetExecution(
func (m *ExecutionManager) UpdateExecution(ctx context.Context, request *admin.ExecutionUpdateRequest,
requestedAt time.Time) (*admin.ExecutionUpdateResponse, error) {
- if err := validation.ValidateWorkflowExecutionIdentifier(request.Id); err != nil {
+ if err := validation.ValidateWorkflowExecutionIdentifier(request.GetId()); err != nil {
logger.Debugf(ctx, "UpdateExecution request [%+v] failed validation with err: %v", request, err)
return nil, err
}
- ctx = getExecutionContext(ctx, request.Id)
- executionModel, err := util.GetExecutionModel(ctx, m.db, request.Id)
+ ctx = getExecutionContext(ctx, request.GetId())
+ executionModel, err := util.GetExecutionModel(ctx, m.db, request.GetId())
if err != nil {
logger.Debugf(ctx, "Failed to get execution model for request [%+v] with err: %v", request, err)
return nil, err
}
- if err = transformers.UpdateExecutionModelStateChangeDetails(executionModel, request.State, requestedAt,
+ if err = transformers.UpdateExecutionModelStateChangeDetails(executionModel, request.GetState(), requestedAt,
getUser(ctx)); err != nil {
return nil, err
}
@@ -1582,15 +1582,15 @@ func (m *ExecutionManager) UpdateExecution(ctx context.Context, request *admin.E
func (m *ExecutionManager) GetExecutionData(
ctx context.Context, request *admin.WorkflowExecutionGetDataRequest) (*admin.WorkflowExecutionGetDataResponse, error) {
- ctx = getExecutionContext(ctx, request.Id)
- executionModel, err := util.GetExecutionModel(ctx, m.db, request.Id)
+ ctx = getExecutionContext(ctx, request.GetId())
+ executionModel, err := util.GetExecutionModel(ctx, m.db, request.GetId())
if err != nil {
logger.Debugf(ctx, "Failed to get execution model for request [%+v] with err: %v", request, err)
return nil, err
}
execution, err := transformers.FromExecutionModel(ctx, *executionModel, transformers.DefaultExecutionTransformerOptions)
if err != nil {
- logger.Debugf(ctx, "Failed to transform execution model [%+v] to proto object with err: %v", request.Id, err)
+ logger.Debugf(ctx, "Failed to transform execution model [%+v] to proto object with err: %v", request.GetId(), err)
return nil, err
}
// Prior to flyteidl v0.15.0, Inputs were held in ExecutionClosure and were not offloaded. Ensure we can return the inputs as expected.
@@ -1600,7 +1600,7 @@ func (m *ExecutionManager) GetExecutionData(
if err := proto.Unmarshal(executionModel.Closure, closure); err != nil {
return nil, err
}
- newInputsURI, err := common.OffloadLiteralMap(ctx, m.storageClient, closure.ComputedInputs, request.Id.Project, request.Id.Domain, request.Id.Name, shared.Inputs)
+ newInputsURI, err := common.OffloadLiteralMap(ctx, m.storageClient, closure.GetComputedInputs(), request.GetId().GetProject(), request.GetId().GetDomain(), request.GetId().GetName(), shared.Inputs)
if err != nil {
return nil, err
}
@@ -1626,7 +1626,7 @@ func (m *ExecutionManager) GetExecutionData(
group.Go(func() error {
var err error
outputs, outputURLBlob, err = util.GetOutputs(groupCtx, m.urlData, m.config.ApplicationConfiguration().GetRemoteDataConfig(),
- m.storageClient, util.ToExecutionClosureInterface(execution.Closure))
+ m.storageClient, util.ToExecutionClosureInterface(execution.GetClosure()))
return err
})
@@ -1642,11 +1642,11 @@ func (m *ExecutionManager) GetExecutionData(
FullOutputs: outputs,
}
- m.userMetrics.WorkflowExecutionInputBytes.Observe(float64(response.Inputs.Bytes))
- if response.Outputs.Bytes > 0 {
- m.userMetrics.WorkflowExecutionOutputBytes.Observe(float64(response.Outputs.Bytes))
- } else if response.FullOutputs != nil {
- m.userMetrics.WorkflowExecutionOutputBytes.Observe(float64(proto.Size(response.FullOutputs)))
+ m.userMetrics.WorkflowExecutionInputBytes.Observe(float64(response.GetInputs().GetBytes()))
+ if response.GetOutputs().GetBytes() > 0 {
+ m.userMetrics.WorkflowExecutionOutputBytes.Observe(float64(response.GetOutputs().GetBytes()))
+ } else if response.GetFullOutputs() != nil {
+ m.userMetrics.WorkflowExecutionOutputBytes.Observe(float64(proto.Size(response.GetFullOutputs())))
}
return response, nil
}
@@ -1658,26 +1658,26 @@ func (m *ExecutionManager) ListExecutions(
logger.Debugf(ctx, "ListExecutions request [%+v] failed validation with err: %v", request, err)
return nil, err
}
- ctx = contextutils.WithProjectDomain(ctx, request.Id.Project, request.Id.Domain)
+ ctx = contextutils.WithProjectDomain(ctx, request.GetId().GetProject(), request.GetId().GetDomain())
filters, err := util.GetDbFilters(util.FilterSpec{
- Project: request.Id.Project,
- Domain: request.Id.Domain,
- Name: request.Id.Name, // Optional, may be empty.
- RequestFilters: request.Filters,
+ Project: request.GetId().GetProject(),
+ Domain: request.GetId().GetDomain(),
+ Name: request.GetId().GetName(), // Optional, may be empty.
+ RequestFilters: request.GetFilters(),
}, common.Execution)
if err != nil {
return nil, err
}
- sortParameter, err := common.NewSortParameter(request.SortBy, models.ExecutionColumns)
+ sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.ExecutionColumns)
if err != nil {
return nil, err
}
- offset, err := validation.ValidateToken(request.Token)
+ offset, err := validation.ValidateToken(request.GetToken())
if err != nil {
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid pagination token %s for ListExecutions",
- request.Token)
+ request.GetToken())
}
joinTableEntities := make(map[common.Entity]bool)
for _, filter := range filters {
@@ -1690,7 +1690,7 @@ func (m *ExecutionManager) ListExecutions(
}
listExecutionsInput := repositoryInterfaces.ListResourceInput{
- Limit: int(request.Limit),
+ Limit: int(request.GetLimit()),
Offset: offset,
InlineFilters: filters,
SortParameter: sortParameter,
@@ -1717,7 +1717,7 @@ func (m *ExecutionManager) ListExecutions(
// END TO BE DELETED
var token string
- if len(executionList) == int(request.Limit) {
+ if len(executionList) == int(request.GetLimit()) {
token = strconv.Itoa(offset + len(executionList))
}
return &admin.ExecutionList{
@@ -1736,16 +1736,16 @@ func (m *ExecutionManager) publishNotifications(ctx context.Context, request *ad
if err != nil {
// This shouldn't happen because execution manager marshaled the data into models.Execution.
m.systemMetrics.TransformerError.Inc()
- return errors.NewFlyteAdminErrorf(codes.Internal, "Failed to transform execution [%+v] with err: %v", request.Event.ExecutionId, err)
+ return errors.NewFlyteAdminErrorf(codes.Internal, "Failed to transform execution [%+v] with err: %v", request.GetEvent().GetExecutionId(), err)
}
- var notificationsList = adminExecution.Closure.Notifications
+ var notificationsList = adminExecution.GetClosure().GetNotifications()
logger.Debugf(ctx, "publishing notifications for execution [%+v] in state [%+v] for notifications [%+v]",
- request.Event.ExecutionId, request.Event.Phase, notificationsList)
+ request.GetEvent().GetExecutionId(), request.GetEvent().GetPhase(), notificationsList)
for _, notification := range notificationsList {
// Check if the notification phase matches the current one.
var matchPhase = false
- for _, phase := range notification.Phases {
- if phase == request.Event.Phase {
+ for _, phase := range notification.GetPhases() {
+ if phase == request.GetEvent().GetPhase() {
matchPhase = true
}
}
@@ -1765,11 +1765,11 @@ func (m *ExecutionManager) publishNotifications(ctx context.Context, request *ad
} else if notification.GetSlack() != nil {
emailNotification.RecipientsEmail = notification.GetSlack().GetRecipientsEmail()
} else {
- logger.Debugf(ctx, "failed to publish notification, encountered unrecognized type: %v", notification.Type)
+ logger.Debugf(ctx, "failed to publish notification, encountered unrecognized type: %v", notification.GetType())
m.systemMetrics.UnexpectedDataError.Inc()
// Unsupported notification types should have been caught when the launch plan was being created.
return errors.NewFlyteAdminErrorf(codes.Internal, "Unsupported notification type [%v] for execution [%+v]",
- notification.Type, request.Event.ExecutionId)
+ notification.GetType(), request.GetEvent().GetExecutionId())
}
// Convert the email Notification into an email message to be published.
@@ -1789,19 +1789,19 @@ func (m *ExecutionManager) publishNotifications(ctx context.Context, request *ad
func (m *ExecutionManager) TerminateExecution(
ctx context.Context, request *admin.ExecutionTerminateRequest) (*admin.ExecutionTerminateResponse, error) {
- if err := validation.ValidateWorkflowExecutionIdentifier(request.Id); err != nil {
+ if err := validation.ValidateWorkflowExecutionIdentifier(request.GetId()); err != nil {
logger.Debugf(ctx, "received terminate execution request: %v with invalid identifier: %v", request, err)
return nil, err
}
- ctx = getExecutionContext(ctx, request.Id)
+ ctx = getExecutionContext(ctx, request.GetId())
// Save the abort reason (best effort)
executionModel, err := m.db.ExecutionRepo().Get(ctx, repositoryInterfaces.Identifier{
- Project: request.Id.Project,
- Domain: request.Id.Domain,
- Name: request.Id.Name,
+ Project: request.GetId().GetProject(),
+ Domain: request.GetId().GetDomain(),
+ Name: request.GetId().GetName(),
})
if err != nil {
- logger.Infof(ctx, "couldn't find execution [%+v] to save termination cause", request.Id)
+ logger.Infof(ctx, "couldn't find execution [%+v] to save termination cause", request.GetId())
return nil, err
}
@@ -1809,24 +1809,24 @@ func (m *ExecutionManager) TerminateExecution(
return nil, errors.NewAlreadyInTerminalStateError(ctx, "Cannot abort an already terminated workflow execution", executionModel.Phase)
}
- err = transformers.SetExecutionAborting(&executionModel, request.Cause, getUser(ctx))
+ err = transformers.SetExecutionAborting(&executionModel, request.GetCause(), getUser(ctx))
if err != nil {
- logger.Debugf(ctx, "failed to add abort metadata for execution [%+v] with err: %v", request.Id, err)
+ logger.Debugf(ctx, "failed to add abort metadata for execution [%+v] with err: %v", request.GetId(), err)
return nil, err
}
err = m.db.ExecutionRepo().Update(ctx, executionModel)
if err != nil {
- logger.Debugf(ctx, "failed to save abort cause for terminated execution: %+v with err: %v", request.Id, err)
+ logger.Debugf(ctx, "failed to save abort cause for terminated execution: %+v with err: %v", request.GetId(), err)
return nil, err
}
workflowExecutor := plugins.Get[workflowengineInterfaces.WorkflowExecutor](m.pluginRegistry, plugins.PluginIDWorkflowExecutor)
err = workflowExecutor.Abort(ctx, workflowengineInterfaces.AbortData{
Namespace: common.GetNamespaceName(
- m.config.NamespaceMappingConfiguration().GetNamespaceTemplate(), request.Id.Project, request.Id.Domain),
+ m.config.NamespaceMappingConfiguration().GetNamespaceTemplate(), request.GetId().GetProject(), request.GetId().GetDomain()),
- ExecutionID: request.Id,
+ ExecutionID: request.GetId(),
Cluster: executionModel.Cluster,
})
if err != nil {
@@ -1916,7 +1916,7 @@ func (m *ExecutionManager) addProjectLabels(ctx context.Context, projectName str
return nil, err
}
// passing nil domain as not needed to retrieve labels
- projectLabels := transformers.FromProjectModel(project, nil).Labels.GetValues()
+ projectLabels := transformers.FromProjectModel(project, nil).GetLabels().GetValues()
if initialLabels == nil {
initialLabels = make(map[string]string)
diff --git a/flyteadmin/pkg/manager/impl/execution_manager_test.go b/flyteadmin/pkg/manager/impl/execution_manager_test.go
index 5e874a4589..79068d25ff 100644
--- a/flyteadmin/pkg/manager/impl/execution_manager_test.go
+++ b/flyteadmin/pkg/manager/impl/execution_manager_test.go
@@ -60,7 +60,7 @@ const (
executionClusterLabel = "execution_cluster_label"
)
-var spec = testutils.GetExecutionRequest().Spec
+var spec = testutils.GetExecutionRequest().GetSpec()
var specBytes, _ = proto.Marshal(spec)
var phase = core.WorkflowExecution_RUNNING.String()
var closure = admin.ExecutionClosure{
@@ -94,8 +94,8 @@ var resourceLimits = runtimeInterfaces.TaskResourceSet{
func getLegacySpec() *admin.ExecutionSpec {
executionRequest := testutils.GetExecutionRequest()
- legacySpec := executionRequest.Spec
- legacySpec.Inputs = executionRequest.Inputs
+ legacySpec := executionRequest.GetSpec()
+ legacySpec.Inputs = executionRequest.GetInputs()
return legacySpec
}
@@ -121,7 +121,7 @@ func getExpectedLegacySpecBytes() []byte {
}
func getExpectedSpec() *admin.ExecutionSpec {
- expectedSpec := testutils.GetExecutionRequest().Spec
+ expectedSpec := testutils.GetExecutionRequest().GetSpec()
expectedSpec.Metadata = &admin.ExecutionMetadata{
SystemMetadata: &admin.SystemMetadata{
Namespace: "project-domain",
@@ -138,7 +138,7 @@ func getExpectedSpecBytes() []byte {
func getLegacyClosure() *admin.ExecutionClosure {
return &admin.ExecutionClosure{
Phase: core.WorkflowExecution_RUNNING,
- ComputedInputs: getLegacySpec().Inputs,
+ ComputedInputs: getLegacySpec().GetInputs(),
StateChangeDetails: &admin.ExecutionStateChangeDetails{
State: admin.ExecutionState_EXECUTION_ACTIVE,
OccurredAt: testutils.MockCreatedAtProto,
@@ -153,7 +153,7 @@ func getLegacyClosureBytes() []byte {
func getLegacyExecutionRequest() *admin.ExecutionCreateRequest {
r := testutils.GetExecutionRequest()
- r.Spec.Inputs = r.Inputs
+ r.Spec.Inputs = r.GetInputs()
r.Inputs = nil
return r
}
@@ -193,7 +193,7 @@ func setDefaultLpCallbackForExecTest(repository interfaces.Repository) {
lpSpecBytes, _ := proto.Marshal(lpSpec)
lpClosure := admin.LaunchPlanClosure{
- ExpectedInputs: lpSpec.DefaultInputs,
+ ExpectedInputs: lpSpec.GetDefaultInputs(),
}
lpClosureBytes, _ := proto.Marshal(&lpClosure)
@@ -313,11 +313,11 @@ func TestCreateExecution(t *testing.T) {
var spec admin.ExecutionSpec
err := proto.Unmarshal(input.Spec, &spec)
assert.NoError(t, err)
- assert.Equal(t, principal, spec.Metadata.Principal)
- assert.Equal(t, rawOutput, spec.RawOutputDataConfig.OutputLocationPrefix)
- assert.True(t, proto.Equal(spec.ClusterAssignment, &clusterAssignment))
+ assert.Equal(t, principal, spec.GetMetadata().GetPrincipal())
+ assert.Equal(t, rawOutput, spec.GetRawOutputDataConfig().GetOutputLocationPrefix())
+ assert.True(t, proto.Equal(spec.GetClusterAssignment(), &clusterAssignment))
assert.Equal(t, "launch_plan", input.LaunchEntity)
- assert.Equal(t, spec.GetMetadata().GetSystemMetadata().Namespace, "project-domain")
+ assert.Equal(t, spec.GetMetadata().GetSystemMetadata().GetNamespace(), "project-domain")
return nil
})
setDefaultLpCallbackForExecTest(repository)
@@ -347,10 +347,10 @@ func TestCreateExecution(t *testing.T) {
mockExecutor.OnExecuteMatch(mock.Anything, mock.MatchedBy(func(data workflowengineInterfaces.ExecutionData) bool {
tasks := data.WorkflowClosure.GetTasks()
for _, task := range tasks {
- assert.Equal(t, len(resources.Requests), len(task.Template.GetContainer().Resources.Requests))
- for i, request := range resources.Requests {
- assert.True(t, proto.Equal(request, task.Template.GetContainer().Resources.Requests[i]))
- assert.True(t, proto.Equal(request, task.Template.GetContainer().Resources.Limits[i]))
+ assert.Equal(t, len(resources.GetRequests()), len(task.GetTemplate().GetContainer().GetResources().GetRequests()))
+ for i, request := range resources.GetRequests() {
+ assert.True(t, proto.Equal(request, task.GetTemplate().GetContainer().GetResources().GetRequests()[i]))
+ assert.True(t, proto.Equal(request, task.GetTemplate().GetContainer().GetResources().GetLimits()[i]))
}
}
@@ -401,7 +401,7 @@ func TestCreateExecution(t *testing.T) {
Id: &executionIdentifier,
}
assert.NoError(t, err)
- assert.True(t, proto.Equal(expectedResponse.Id, response.Id))
+ assert.True(t, proto.Equal(expectedResponse.GetId(), response.GetId()))
// TODO: Check for offloaded inputs
}
@@ -436,9 +436,9 @@ func TestCreateExecutionFromWorkflowNode(t *testing.T) {
var clusterLabel = &admin.ExecutionClusterLabel{Value: executionClusterLabel}
repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetGetCallback(
func(ctx context.Context, input interfaces.Identifier) (models.Execution, error) {
- assert.EqualValues(t, input.Project, parentNodeExecutionID.ExecutionId.Project)
- assert.EqualValues(t, input.Domain, parentNodeExecutionID.ExecutionId.Domain)
- assert.EqualValues(t, input.Name, parentNodeExecutionID.ExecutionId.Name)
+ assert.EqualValues(t, input.Project, parentNodeExecutionID.GetExecutionId().GetProject())
+ assert.EqualValues(t, input.Domain, parentNodeExecutionID.GetExecutionId().GetDomain())
+ assert.EqualValues(t, input.Name, parentNodeExecutionID.GetExecutionId().GetName())
spec := &admin.ExecutionSpec{
Metadata: &admin.ExecutionMetadata{
Nesting: 1,
@@ -463,13 +463,13 @@ func TestCreateExecutionFromWorkflowNode(t *testing.T) {
var spec admin.ExecutionSpec
err := proto.Unmarshal(input.Spec, &spec)
assert.NoError(t, err)
- assert.Equal(t, admin.ExecutionMetadata_CHILD_WORKFLOW, spec.Metadata.Mode)
- assert.True(t, proto.Equal(parentNodeExecutionID, spec.Metadata.ParentNodeExecution))
+ assert.Equal(t, admin.ExecutionMetadata_CHILD_WORKFLOW, spec.GetMetadata().GetMode())
+ assert.True(t, proto.Equal(parentNodeExecutionID, spec.GetMetadata().GetParentNodeExecution()))
assert.EqualValues(t, input.ParentNodeExecutionID, 1)
assert.EqualValues(t, input.SourceExecutionID, 2)
- assert.Equal(t, 2, int(spec.Metadata.Nesting))
- assert.Equal(t, principal, spec.Metadata.Principal)
- assert.Equal(t, executionClusterLabel, spec.ExecutionClusterLabel.Value)
+ assert.Equal(t, 2, int(spec.GetMetadata().GetNesting()))
+ assert.Equal(t, principal, spec.GetMetadata().GetPrincipal())
+ assert.Equal(t, executionClusterLabel, spec.GetExecutionClusterLabel().GetValue())
assert.Equal(t, principal, input.User)
return nil
},
@@ -505,14 +505,14 @@ func TestCreateExecution_NoAssignedName(t *testing.T) {
setDefaultLpCallbackForExecTest(repository)
repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(
func(ctx context.Context, input models.Execution) error {
- assert.Equal(t, executionIdentifier.Project, input.Project)
- assert.Equal(t, executionIdentifier.Domain, input.Domain)
+ assert.Equal(t, executionIdentifier.GetProject(), input.Project)
+ assert.Equal(t, executionIdentifier.GetDomain(), input.Domain)
assert.NotEmpty(t, input.Name)
return nil
})
mockExecutor := workflowengineMocks.WorkflowExecutor{}
mockExecutor.OnExecuteMatch(mock.Anything, mock.MatchedBy(func(data workflowengineInterfaces.ExecutionData) bool {
- return len(data.ExecutionID.Name) > 0
+ return len(data.ExecutionID.GetName()) > 0
})).Return(workflowengineInterfaces.ExecutionResponse{
Cluster: testCluster,
}, nil)
@@ -530,9 +530,9 @@ func TestCreateExecution_NoAssignedName(t *testing.T) {
Id: &executionIdentifier,
}
assert.Nil(t, err)
- assert.Equal(t, expectedResponse.Id.Project, response.Id.Project)
- assert.Equal(t, expectedResponse.Id.Domain, response.Id.Domain)
- assert.NotEmpty(t, response.Id.Name)
+ assert.Equal(t, expectedResponse.GetId().GetProject(), response.GetId().GetProject())
+ assert.Equal(t, expectedResponse.GetId().GetDomain(), response.GetId().GetDomain())
+ assert.NotEmpty(t, response.GetId().GetName())
}
func TestCreateExecution_TaggedQueue(t *testing.T) {
@@ -558,11 +558,11 @@ func TestCreateExecution_TaggedQueue(t *testing.T) {
mockExecutor := workflowengineMocks.WorkflowExecutor{}
mockExecutor.OnExecuteMatch(mock.Anything, mock.MatchedBy(func(data workflowengineInterfaces.ExecutionData) bool {
- assert.NotEmpty(t, data.WorkflowClosure.Tasks)
- for _, task := range data.WorkflowClosure.Tasks {
- assert.Len(t, task.Template.GetContainer().Config, 1)
- assert.Contains(t, childContainerQueueKey, task.Template.GetContainer().Config[0].Key)
- assert.Contains(t, "dynamic Q", task.Template.GetContainer().Config[0].Value)
+ assert.NotEmpty(t, data.WorkflowClosure.GetTasks())
+ for _, task := range data.WorkflowClosure.GetTasks() {
+ assert.Len(t, task.GetTemplate().GetContainer().GetConfig(), 1)
+ assert.Contains(t, childContainerQueueKey, task.GetTemplate().GetContainer().GetConfig()[0].GetKey())
+ assert.Contains(t, "dynamic Q", task.GetTemplate().GetContainer().GetConfig()[0].GetValue())
}
return true
})).Return(workflowengineInterfaces.ExecutionResponse{
@@ -720,14 +720,14 @@ func TestCreateExecutionVerifyDbModel(t *testing.T) {
if err != nil {
return err
}
- assert.Nil(t, specValue.Inputs)
+ assert.Nil(t, specValue.GetInputs())
var closureValue admin.ExecutionClosure
err = proto.Unmarshal(input.Closure, &closureValue)
if err != nil {
return err
}
- assert.Nil(t, closureValue.ComputedInputs)
+ assert.Nil(t, closureValue.GetComputedInputs())
var userInputs, inputs core.LiteralMap
if err := storageClient.ReadProtobuf(ctx, input.UserInputsURI, &userInputs); err != nil {
@@ -737,19 +737,19 @@ func TestCreateExecutionVerifyDbModel(t *testing.T) {
return err
}
fooValue := coreutils.MustMakeLiteral("foo-value-1")
- assert.Equal(t, 1, len(userInputs.Literals))
- assert.EqualValues(t, userInputs.Literals["foo"], fooValue)
+ assert.Equal(t, 1, len(userInputs.GetLiterals()))
+ assert.EqualValues(t, userInputs.GetLiterals()["foo"], fooValue)
barValue := coreutils.MustMakeLiteral("bar-value")
- assert.Equal(t, len(inputs.Literals), 2)
- assert.EqualValues(t, inputs.Literals["foo"], fooValue)
- assert.EqualValues(t, inputs.Literals["bar"], barValue)
- assert.Equal(t, core.WorkflowExecution_UNDEFINED, closureValue.Phase)
+ assert.Equal(t, len(inputs.GetLiterals()), 2)
+ assert.EqualValues(t, inputs.GetLiterals()["foo"], fooValue)
+ assert.EqualValues(t, inputs.GetLiterals()["bar"], barValue)
+ assert.Equal(t, core.WorkflowExecution_UNDEFINED, closureValue.GetPhase())
assert.Equal(t, createdAt, *input.ExecutionCreatedAt)
- assert.Equal(t, 1, len(closureValue.Notifications))
- assert.Equal(t, 1, len(closureValue.Notifications[0].Phases))
- assert.Equal(t, request.Spec.GetNotifications().Notifications[0].Phases[0], closureValue.Notifications[0].Phases[0])
- assert.IsType(t, &admin.Notification_Slack{}, closureValue.Notifications[0].GetType())
- assert.Equal(t, request.Spec.GetNotifications().Notifications[0].GetSlack().RecipientsEmail, closureValue.Notifications[0].GetSlack().RecipientsEmail)
+ assert.Equal(t, 1, len(closureValue.GetNotifications()))
+ assert.Equal(t, 1, len(closureValue.GetNotifications()[0].GetPhases()))
+ assert.Equal(t, request.GetSpec().GetNotifications().GetNotifications()[0].GetPhases()[0], closureValue.GetNotifications()[0].GetPhases()[0])
+ assert.IsType(t, &admin.Notification_Slack{}, closureValue.GetNotifications()[0].GetType())
+ assert.Equal(t, request.GetSpec().GetNotifications().GetNotifications()[0].GetSlack().GetRecipientsEmail(), closureValue.GetNotifications()[0].GetSlack().GetRecipientsEmail())
return nil
}
@@ -766,7 +766,7 @@ func TestCreateExecutionVerifyDbModel(t *testing.T) {
response, err := execManager.CreateExecution(context.Background(), request, requestedAt)
assert.Nil(t, err)
- assert.True(t, proto.Equal(&executionIdentifier, response.Id))
+ assert.True(t, proto.Equal(&executionIdentifier, response.GetId()))
}
func TestCreateExecutionDefaultNotifications(t *testing.T) {
@@ -790,10 +790,10 @@ func TestCreateExecutionDefaultNotifications(t *testing.T) {
return err
}
- assert.Equal(t, 1, len(closureValue.Notifications))
- assert.Equal(t, 1, len(closureValue.Notifications[0].Phases))
- assert.Equal(t, core.WorkflowExecution_SUCCEEDED, closureValue.Notifications[0].Phases[0])
- assert.IsType(t, &admin.Notification_Email{}, closureValue.Notifications[0].GetType())
+ assert.Equal(t, 1, len(closureValue.GetNotifications()))
+ assert.Equal(t, 1, len(closureValue.GetNotifications()[0].GetPhases()))
+ assert.Equal(t, core.WorkflowExecution_SUCCEEDED, closureValue.GetNotifications()[0].GetPhases()[0])
+ assert.IsType(t, &admin.Notification_Email{}, closureValue.GetNotifications()[0].GetType())
return nil
}
@@ -811,7 +811,7 @@ func TestCreateExecutionDefaultNotifications(t *testing.T) {
Project: "project",
Domain: "domain",
Name: "name",
- }, response.Id))
+ }, response.GetId()))
}
func TestCreateExecutionDisableNotifications(t *testing.T) {
@@ -833,7 +833,7 @@ func TestCreateExecutionDisableNotifications(t *testing.T) {
return err
}
- assert.Empty(t, closureValue.Notifications)
+ assert.Empty(t, closureValue.GetNotifications())
return nil
}
repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(exCreateFunc)
@@ -850,7 +850,7 @@ func TestCreateExecutionDisableNotifications(t *testing.T) {
Project: "project",
Domain: "domain",
Name: "name",
- }, response.Id))
+ }, response.GetId()))
}
func TestCreateExecutionNoNotifications(t *testing.T) {
@@ -868,7 +868,7 @@ func TestCreateExecutionNoNotifications(t *testing.T) {
lpSpec.EntityMetadata.Notifications = nil
lpSpecBytes, _ := proto.Marshal(lpSpec)
lpClosure := admin.LaunchPlanClosure{
- ExpectedInputs: lpSpec.DefaultInputs,
+ ExpectedInputs: lpSpec.GetDefaultInputs(),
}
lpClosureBytes, _ := proto.Marshal(&lpClosure)
@@ -920,7 +920,7 @@ func TestCreateExecutionNoNotifications(t *testing.T) {
Project: "project",
Domain: "domain",
Name: "name",
- }, response.Id))
+ }, response.GetId()))
}
func TestCreateExecutionDynamicLabelsAndAnnotations(t *testing.T) {
@@ -1209,8 +1209,8 @@ func TestCreateExecutionWithEnvs(t *testing.T) {
assert.Equal(t, uint(0), input.TaskID)
}
if len(tt.envs) != 0 {
- assert.Equal(t, tt.envs[0].Key, spec.GetEnvs().Values[0].Key)
- assert.Equal(t, tt.envs[0].Value, spec.GetEnvs().Values[0].Value)
+ assert.Equal(t, tt.envs[0].GetKey(), spec.GetEnvs().GetValues()[0].GetKey())
+ assert.Equal(t, tt.envs[0].GetValue(), spec.GetEnvs().GetValues()[0].GetValue())
} else {
assert.Nil(t, spec.GetEnvs().GetValues())
}
@@ -1244,7 +1244,7 @@ func TestCreateExecution_CustomNamespaceMappingConfig(t *testing.T) {
var spec admin.ExecutionSpec
err := proto.Unmarshal(input.Spec, &spec)
assert.NoError(t, err)
- assert.Equal(t, spec.GetMetadata().GetSystemMetadata().Namespace, "project")
+ assert.Equal(t, spec.GetMetadata().GetSystemMetadata().GetNamespace(), "project")
return nil
}
@@ -1272,7 +1272,7 @@ func TestCreateExecution_CustomNamespaceMappingConfig(t *testing.T) {
response, err := execManager.CreateExecution(context.Background(), request, requestedAt)
assert.Nil(t, err)
- assert.True(t, proto.Equal(&executionIdentifier, response.Id))
+ assert.True(t, proto.Equal(&executionIdentifier, response.GetId()))
}
func makeExecutionGetFunc(
@@ -1341,7 +1341,7 @@ func makeExecutionInterruptibleGetFunc(
request.Spec.Interruptible = &wrappers.BoolValue{Value: *interruptible}
}
- specBytes, err := proto.Marshal(request.Spec)
+ specBytes, err := proto.Marshal(request.GetSpec())
assert.Nil(t, err)
return models.Execution{
@@ -1374,7 +1374,7 @@ func makeExecutionOverwriteCacheGetFunc(
request := testutils.GetExecutionRequest()
request.Spec.OverwriteCache = overwriteCache
- specBytes, err := proto.Marshal(request.Spec)
+ specBytes, err := proto.Marshal(request.GetSpec())
assert.Nil(t, err)
return models.Execution{
@@ -1407,7 +1407,7 @@ func makeExecutionWithEnvs(
request := testutils.GetExecutionRequest()
request.Spec.Envs.Values = envs
- specBytes, err := proto.Marshal(request.Spec)
+ specBytes, err := proto.Marshal(request.GetSpec())
assert.Nil(t, err)
return models.Execution{
@@ -1460,7 +1460,7 @@ func TestRelaunchExecution(t *testing.T) {
var spec admin.ExecutionSpec
err := proto.Unmarshal(input.Spec, &spec)
assert.Nil(t, err)
- assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.Metadata.Mode)
+ assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.GetMetadata().GetMode())
assert.Equal(t, int32(admin.ExecutionMetadata_RELAUNCH), input.Mode)
return nil
}
@@ -1600,7 +1600,7 @@ func TestRelaunchExecutionInterruptibleOverride(t *testing.T) {
var spec admin.ExecutionSpec
err := proto.Unmarshal(input.Spec, &spec)
assert.Nil(t, err)
- assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.Metadata.Mode)
+ assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.GetMetadata().GetMode())
assert.Equal(t, int32(admin.ExecutionMetadata_RELAUNCH), input.Mode)
assert.NotNil(t, spec.GetInterruptible())
assert.True(t, spec.GetInterruptible().GetValue())
@@ -1652,7 +1652,7 @@ func TestRelaunchExecutionOverwriteCacheOverride(t *testing.T) {
var spec admin.ExecutionSpec
err := proto.Unmarshal(input.Spec, &spec)
assert.Nil(t, err)
- assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.Metadata.Mode)
+ assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.GetMetadata().GetMode())
assert.Equal(t, int32(admin.ExecutionMetadata_RELAUNCH), input.Mode)
assert.True(t, spec.GetOverwriteCache())
return nil
@@ -1687,7 +1687,7 @@ func TestRelaunchExecutionOverwriteCacheOverride(t *testing.T) {
var spec admin.ExecutionSpec
err := proto.Unmarshal(input.Spec, &spec)
assert.Nil(t, err)
- assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.Metadata.Mode)
+ assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.GetMetadata().GetMode())
assert.Equal(t, int32(admin.ExecutionMetadata_RELAUNCH), input.Mode)
assert.False(t, spec.GetOverwriteCache())
return nil
@@ -1722,7 +1722,7 @@ func TestRelaunchExecutionOverwriteCacheOverride(t *testing.T) {
var spec admin.ExecutionSpec
err := proto.Unmarshal(input.Spec, &spec)
assert.Nil(t, err)
- assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.Metadata.Mode)
+ assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.GetMetadata().GetMode())
assert.Equal(t, int32(admin.ExecutionMetadata_RELAUNCH), input.Mode)
assert.False(t, spec.GetOverwriteCache())
return nil
@@ -1774,11 +1774,11 @@ func TestRelaunchExecutionEnvsOverride(t *testing.T) {
var spec admin.ExecutionSpec
err := proto.Unmarshal(input.Spec, &spec)
assert.Nil(t, err)
- assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.Metadata.Mode)
+ assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.GetMetadata().GetMode())
assert.Equal(t, int32(admin.ExecutionMetadata_RELAUNCH), input.Mode)
assert.NotNil(t, spec.GetEnvs())
- assert.Equal(t, spec.GetEnvs().Values[0].Key, env[0].Key)
- assert.Equal(t, spec.GetEnvs().Values[0].Value, env[0].Value)
+ assert.Equal(t, spec.GetEnvs().GetValues()[0].GetKey(), env[0].GetKey())
+ assert.Equal(t, spec.GetEnvs().GetValues()[0].GetValue(), env[0].GetValue())
return nil
}
repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(exCreateFunc)
@@ -1825,7 +1825,7 @@ func TestRecoverExecution(t *testing.T) {
var spec admin.ExecutionSpec
err := proto.Unmarshal(input.Spec, &spec)
assert.Nil(t, err)
- assert.Equal(t, admin.ExecutionMetadata_RECOVERED, spec.Metadata.Mode)
+ assert.Equal(t, admin.ExecutionMetadata_RECOVERED, spec.GetMetadata().GetMode())
assert.Equal(t, int32(admin.ExecutionMetadata_RECOVERED), input.Mode)
return nil
}
@@ -1904,7 +1904,7 @@ func TestRecoverExecution_RecoveredChildNode(t *testing.T) {
var spec admin.ExecutionSpec
err := proto.Unmarshal(input.Spec, &spec)
assert.Nil(t, err)
- assert.Equal(t, admin.ExecutionMetadata_RECOVERED, spec.Metadata.Mode)
+ assert.Equal(t, admin.ExecutionMetadata_RECOVERED, spec.GetMetadata().GetMode())
assert.Equal(t, int32(admin.ExecutionMetadata_RECOVERED), input.Mode)
assert.Equal(t, parentNodeDatabaseID, input.ParentNodeExecutionID)
assert.Equal(t, referencedExecutionID, input.SourceExecutionID)
@@ -2067,7 +2067,7 @@ func TestRecoverExecutionInterruptibleOverride(t *testing.T) {
var spec admin.ExecutionSpec
err := proto.Unmarshal(input.Spec, &spec)
assert.Nil(t, err)
- assert.Equal(t, admin.ExecutionMetadata_RECOVERED, spec.Metadata.Mode)
+ assert.Equal(t, admin.ExecutionMetadata_RECOVERED, spec.GetMetadata().GetMode())
assert.Equal(t, int32(admin.ExecutionMetadata_RECOVERED), input.Mode)
assert.NotNil(t, spec.GetInterruptible())
assert.True(t, spec.GetInterruptible().GetValue())
@@ -2129,7 +2129,7 @@ func TestRecoverExecutionOverwriteCacheOverride(t *testing.T) {
var spec admin.ExecutionSpec
err := proto.Unmarshal(input.Spec, &spec)
assert.Nil(t, err)
- assert.Equal(t, admin.ExecutionMetadata_RECOVERED, spec.Metadata.Mode)
+ assert.Equal(t, admin.ExecutionMetadata_RECOVERED, spec.GetMetadata().GetMode())
assert.Equal(t, int32(admin.ExecutionMetadata_RECOVERED), input.Mode)
assert.True(t, spec.GetOverwriteCache())
return nil
@@ -2189,11 +2189,11 @@ func TestRecoverExecutionEnvsOverride(t *testing.T) {
var spec admin.ExecutionSpec
err := proto.Unmarshal(input.Spec, &spec)
assert.Nil(t, err)
- assert.Equal(t, admin.ExecutionMetadata_RECOVERED, spec.Metadata.Mode)
+ assert.Equal(t, admin.ExecutionMetadata_RECOVERED, spec.GetMetadata().GetMode())
assert.Equal(t, int32(admin.ExecutionMetadata_RECOVERED), input.Mode)
assert.NotNil(t, spec.GetEnvs())
- assert.Equal(t, spec.GetEnvs().GetValues()[0].Key, env[0].Key)
- assert.Equal(t, spec.GetEnvs().GetValues()[0].Value, env[0].Value)
+ assert.Equal(t, spec.GetEnvs().GetValues()[0].GetKey(), env[0].GetKey())
+ assert.Equal(t, spec.GetEnvs().GetValues()[0].GetValue(), env[0].GetValue())
return nil
}
repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(exCreateFunc)
@@ -2843,9 +2843,9 @@ func TestGetExecution(t *testing.T) {
Id: &executionIdentifier,
})
assert.NoError(t, err)
- assert.True(t, proto.Equal(&executionIdentifier, execution.Id))
- assert.True(t, proto.Equal(getExpectedSpec(), execution.Spec))
- assert.True(t, proto.Equal(&closure, execution.Closure))
+ assert.True(t, proto.Equal(&executionIdentifier, execution.GetId()))
+ assert.True(t, proto.Equal(getExpectedSpec(), execution.GetSpec()))
+ assert.True(t, proto.Equal(&closure, execution.GetClosure()))
}
func TestGetExecution_DatabaseError(t *testing.T) {
@@ -3070,18 +3070,18 @@ func TestListExecutions(t *testing.T) {
})
assert.NoError(t, err)
assert.NotNil(t, executionList)
- assert.Len(t, executionList.Executions, 2)
+ assert.Len(t, executionList.GetExecutions(), 2)
- for idx, execution := range executionList.Executions {
- assert.Equal(t, projectValue, execution.Id.Project)
- assert.Equal(t, domainValue, execution.Id.Domain)
+ for idx, execution := range executionList.GetExecutions() {
+ assert.Equal(t, projectValue, execution.GetId().GetProject())
+ assert.Equal(t, domainValue, execution.GetId().GetDomain())
if idx == 0 {
- assert.Equal(t, "my awesome execution", execution.Id.Name)
+ assert.Equal(t, "my awesome execution", execution.GetId().GetName())
}
- assert.True(t, proto.Equal(getExpectedSpec(), execution.Spec))
- assert.True(t, proto.Equal(&closure, execution.Closure))
+ assert.True(t, proto.Equal(getExpectedSpec(), execution.GetSpec()))
+ assert.True(t, proto.Equal(&closure, execution.GetClosure()))
}
- assert.Empty(t, executionList.Token)
+ assert.Empty(t, executionList.GetToken())
}
func TestListExecutions_MissingParameters(t *testing.T) {
@@ -3212,7 +3212,7 @@ func TestExecutionManager_PublishNotifications(t *testing.T) {
},
}
var execClosure = &admin.ExecutionClosure{
- Notifications: testutils.GetExecutionRequest().Spec.GetNotifications().Notifications,
+ Notifications: testutils.GetExecutionRequest().GetSpec().GetNotifications().GetNotifications(),
WorkflowId: &core.Identifier{
ResourceType: core.ResourceType_WORKFLOW,
Project: "wf_project",
@@ -3248,8 +3248,8 @@ func TestExecutionManager_PublishNotifications(t *testing.T) {
},
},
}
- execClosure.Notifications = append(execClosure.Notifications, extraNotifications[0])
- execClosure.Notifications = append(execClosure.Notifications, extraNotifications[1])
+ execClosure.Notifications = append(execClosure.GetNotifications(), extraNotifications[0])
+ execClosure.Notifications = append(execClosure.GetNotifications(), extraNotifications[1])
execClosureBytes, _ := proto.Marshal(execClosure)
executionModel := models.Execution{
@@ -3351,7 +3351,7 @@ func TestExecutionManager_TestExecutionManager_PublishNotificationsTransformErro
},
}
var execClosure = &admin.ExecutionClosure{
- Notifications: testutils.GetExecutionRequest().Spec.GetNotifications().Notifications,
+ Notifications: testutils.GetExecutionRequest().GetSpec().GetNotifications().GetNotifications(),
WorkflowId: &core.Identifier{
ResourceType: core.ResourceType_WORKFLOW,
Project: "wf_project",
@@ -3402,7 +3402,7 @@ func TestExecutionManager_PublishNotificationsNoPhaseMatch(t *testing.T) {
},
}
var execClosure = &admin.ExecutionClosure{
- Notifications: testutils.GetExecutionRequest().Spec.GetNotifications().Notifications,
+ Notifications: testutils.GetExecutionRequest().GetSpec().GetNotifications().GetNotifications(),
}
execClosureBytes, _ := proto.Marshal(execClosure)
executionModel := models.Execution{
@@ -3723,12 +3723,12 @@ func TestAddPluginOverrides(t *testing.T) {
assert.NoError(t, err)
assert.Len(t, taskPluginOverrides, 2)
for _, override := range taskPluginOverrides {
- if override.TaskType == "python" {
- assert.EqualValues(t, []string{"plugin a"}, override.PluginId)
- } else if override.TaskType == "hive" {
- assert.EqualValues(t, []string{"plugin b"}, override.PluginId)
+ if override.GetTaskType() == "python" {
+ assert.EqualValues(t, []string{"plugin a"}, override.GetPluginId())
+ } else if override.GetTaskType() == "hive" {
+ assert.EqualValues(t, []string{"plugin b"}, override.GetPluginId())
} else {
- t.Errorf("Unexpected task type [%s] plugin override committed to db", override.TaskType)
+ t.Errorf("Unexpected task type [%s] plugin override committed to db", override.GetTaskType())
}
}
}
@@ -3788,9 +3788,9 @@ func TestGetExecution_Legacy(t *testing.T) {
Id: &executionIdentifier,
})
assert.NoError(t, err)
- assert.True(t, proto.Equal(&executionIdentifier, execution.Id))
- assert.True(t, proto.Equal(getExpectedLegacySpec(), execution.Spec))
- assert.True(t, proto.Equal(getLegacyClosure(), execution.Closure))
+ assert.True(t, proto.Equal(&executionIdentifier, execution.GetId()))
+ assert.True(t, proto.Equal(getExpectedLegacySpec(), execution.GetSpec()))
+ assert.True(t, proto.Equal(getLegacyClosure(), execution.GetClosure()))
}
func TestGetExecutionData_LegacyModel(t *testing.T) {
@@ -3870,7 +3870,7 @@ func TestGetExecutionData_LegacyModel(t *testing.T) {
var inputs core.LiteralMap
err = storageClient.ReadProtobuf(context.Background(), storage.DataReference("s3://bucket/metadata/project/domain/name/inputs"), &inputs)
assert.Nil(t, err)
- assert.True(t, proto.Equal(&inputs, closure.ComputedInputs))
+ assert.True(t, proto.Equal(&inputs, closure.GetComputedInputs()))
}
func TestCreateExecution_LegacyClient(t *testing.T) {
@@ -3937,10 +3937,10 @@ func TestRelaunchExecution_LegacyModel(t *testing.T) {
var spec admin.ExecutionSpec
err := proto.Unmarshal(input.Spec, &spec)
assert.Nil(t, err)
- assert.Equal(t, "default_raw_output", spec.RawOutputDataConfig.OutputLocationPrefix)
- assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.Metadata.Mode)
+ assert.Equal(t, "default_raw_output", spec.GetRawOutputDataConfig().GetOutputLocationPrefix())
+ assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.GetMetadata().GetMode())
assert.Equal(t, int32(admin.ExecutionMetadata_RELAUNCH), input.Mode)
- assert.True(t, proto.Equal(spec.Inputs, getLegacySpec().Inputs))
+ assert.True(t, proto.Equal(spec.GetInputs(), getLegacySpec().GetInputs()))
return nil
}
repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(exCreateFunc)
@@ -3971,12 +3971,12 @@ func TestRelaunchExecution_LegacyModel(t *testing.T) {
var userInputs core.LiteralMap
err = storageClient.ReadProtobuf(context.Background(), "s3://bucket/metadata/project/domain/relaunchy/user_inputs", &userInputs)
assert.Nil(t, err)
- assert.True(t, proto.Equal(&userInputs, getLegacySpec().Inputs))
+ assert.True(t, proto.Equal(&userInputs, getLegacySpec().GetInputs()))
var inputs core.LiteralMap
err = storageClient.ReadProtobuf(context.Background(), "s3://bucket/metadata/project/domain/relaunchy/inputs", &inputs)
assert.Nil(t, err)
- assert.True(t, proto.Equal(&inputs, existingClosure.ComputedInputs))
+ assert.True(t, proto.Equal(&inputs, existingClosure.GetComputedInputs()))
}
func TestListExecutions_LegacyModel(t *testing.T) {
@@ -4052,18 +4052,18 @@ func TestListExecutions_LegacyModel(t *testing.T) {
})
assert.NoError(t, err)
assert.NotNil(t, executionList)
- assert.Len(t, executionList.Executions, 2)
+ assert.Len(t, executionList.GetExecutions(), 2)
- for idx, execution := range executionList.Executions {
- assert.Equal(t, projectValue, execution.Id.Project)
- assert.Equal(t, domainValue, execution.Id.Domain)
+ for idx, execution := range executionList.GetExecutions() {
+ assert.Equal(t, projectValue, execution.GetId().GetProject())
+ assert.Equal(t, domainValue, execution.GetId().GetDomain())
if idx == 0 {
- assert.Equal(t, "my awesome execution", execution.Id.Name)
+ assert.Equal(t, "my awesome execution", execution.GetId().GetName())
}
- assert.True(t, proto.Equal(spec, execution.Spec))
- assert.True(t, proto.Equal(&closure, execution.Closure))
+ assert.True(t, proto.Equal(spec, execution.GetSpec()))
+ assert.True(t, proto.Equal(&closure, execution.GetClosure()))
}
- assert.Empty(t, executionList.Token)
+ assert.Empty(t, executionList.GetToken())
}
func TestSetDefaults(t *testing.T) {
@@ -4148,7 +4148,7 @@ func TestSetDefaults(t *testing.T) {
},
},
},
- task.Template.GetContainer()), fmt.Sprintf("%+v", task.Template.GetContainer()))
+ task.GetTemplate().GetContainer()), fmt.Sprintf("%+v", task.GetTemplate().GetContainer()))
}
func TestSetDefaults_MissingRequests_ExistingRequestsPreserved(t *testing.T) {
@@ -4224,7 +4224,7 @@ func TestSetDefaults_MissingRequests_ExistingRequestsPreserved(t *testing.T) {
},
},
},
- task.Template.GetContainer()), fmt.Sprintf("%+v", task.Template.GetContainer()))
+ task.GetTemplate().GetContainer()), fmt.Sprintf("%+v", task.GetTemplate().GetContainer()))
}
func TestSetDefaults_OptionalRequiredResources(t *testing.T) {
@@ -4288,7 +4288,7 @@ func TestSetDefaults_OptionalRequiredResources(t *testing.T) {
},
},
},
- task.Template.GetContainer()), fmt.Sprintf("%+v", task.Template.GetContainer()))
+ task.GetTemplate().GetContainer()), fmt.Sprintf("%+v", task.GetTemplate().GetContainer()))
})
t.Run("respect non-required resources when defaults exist in config", func(t *testing.T) {
@@ -4336,7 +4336,7 @@ func TestSetDefaults_OptionalRequiredResources(t *testing.T) {
},
},
},
- task.Template.GetContainer()), fmt.Sprintf("%+v", task.Template.GetContainer()))
+ task.GetTemplate().GetContainer()), fmt.Sprintf("%+v", task.GetTemplate().GetContainer()))
})
}
@@ -4472,7 +4472,7 @@ func TestCreateSingleTaskExecution(t *testing.T) {
}, input.ExecutionKey)
assert.Equal(t, "task", input.LaunchEntity)
assert.Equal(t, "UNDEFINED", input.Phase)
- assert.True(t, proto.Equal(taskIdentifier, spec.LaunchPlan))
+ assert.True(t, proto.Equal(taskIdentifier, spec.GetLaunchPlan()))
return nil
})
@@ -4583,10 +4583,10 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) {
// two requests will be made, one with empty domain and one with filled in domain
assert.Contains(t, []managerInterfaces.ResourceRequest{{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG,
- }, {Project: workflowIdentifier.Project,
+ }, {Project: workflowIdentifier.GetProject(),
Domain: "",
ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG},
}, request)
@@ -4631,8 +4631,8 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
t.Run("request with full config", func(t *testing.T) {
request := &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{
Labels: &admin.Labels{Values: requestLabels},
Annotations: &admin.Annotations{Values: requestAnnotations},
@@ -4656,20 +4656,20 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
ctx := identityContext.WithContext(context.Background())
execConfig, err := executionManager.getExecutionConfig(ctx, request, nil)
assert.NoError(t, err)
- assert.Equal(t, requestMaxParallelism, execConfig.MaxParallelism)
- assert.Equal(t, requestK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount)
- assert.Equal(t, requestInterruptible, execConfig.Interruptible.Value)
- assert.Equal(t, requestOverwriteCache, execConfig.OverwriteCache)
- assert.Equal(t, requestOutputLocationPrefix, execConfig.RawOutputDataConfig.OutputLocationPrefix)
- assert.Equal(t, requestLabels, execConfig.GetLabels().Values)
- assert.Equal(t, requestAnnotations, execConfig.GetAnnotations().Values)
+ assert.Equal(t, requestMaxParallelism, execConfig.GetMaxParallelism())
+ assert.Equal(t, requestK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount())
+ assert.Equal(t, requestInterruptible, execConfig.GetInterruptible().GetValue())
+ assert.Equal(t, requestOverwriteCache, execConfig.GetOverwriteCache())
+ assert.Equal(t, requestOutputLocationPrefix, execConfig.GetRawOutputDataConfig().GetOutputLocationPrefix())
+ assert.Equal(t, requestLabels, execConfig.GetLabels().GetValues())
+ assert.Equal(t, requestAnnotations, execConfig.GetAnnotations().GetValues())
assert.Equal(t, "yeee", execConfig.GetSecurityContext().GetRunAs().GetExecutionIdentity())
- assert.Equal(t, requestEnvironmentVariables, execConfig.GetEnvs().Values)
+ assert.Equal(t, requestEnvironmentVariables, execConfig.GetEnvs().GetValues())
})
t.Run("request with partial config", func(t *testing.T) {
request := &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{
Labels: &admin.Labels{Values: requestLabels},
RawOutputDataConfig: &admin.RawOutputDataConfig{
@@ -4697,19 +4697,19 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
}
execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan)
assert.NoError(t, err)
- assert.Equal(t, requestMaxParallelism, execConfig.MaxParallelism)
- assert.Equal(t, launchPlanInterruptible, execConfig.Interruptible.Value)
- assert.Equal(t, launchPlanOverwriteCache, execConfig.OverwriteCache)
- assert.True(t, proto.Equal(launchPlan.Spec.SecurityContext, execConfig.SecurityContext))
- assert.True(t, proto.Equal(launchPlan.Spec.Annotations, execConfig.Annotations))
- assert.Equal(t, requestOutputLocationPrefix, execConfig.RawOutputDataConfig.OutputLocationPrefix)
- assert.Equal(t, requestLabels, execConfig.GetLabels().Values)
- assert.Equal(t, launchPlanEnvironmentVariables, execConfig.GetEnvs().Values)
+ assert.Equal(t, requestMaxParallelism, execConfig.GetMaxParallelism())
+ assert.Equal(t, launchPlanInterruptible, execConfig.GetInterruptible().GetValue())
+ assert.Equal(t, launchPlanOverwriteCache, execConfig.GetOverwriteCache())
+ assert.True(t, proto.Equal(launchPlan.GetSpec().GetSecurityContext(), execConfig.GetSecurityContext()))
+ assert.True(t, proto.Equal(launchPlan.GetSpec().GetAnnotations(), execConfig.GetAnnotations()))
+ assert.Equal(t, requestOutputLocationPrefix, execConfig.GetRawOutputDataConfig().GetOutputLocationPrefix())
+ assert.Equal(t, requestLabels, execConfig.GetLabels().GetValues())
+ assert.Equal(t, launchPlanEnvironmentVariables, execConfig.GetEnvs().GetValues())
})
t.Run("request with empty security context", func(t *testing.T) {
request := &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{
SecurityContext: &core.SecurityContext{
RunAs: &core.Identity{
@@ -4737,18 +4737,18 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
}
execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan)
assert.NoError(t, err)
- assert.Equal(t, launchPlanMaxParallelism, execConfig.MaxParallelism)
- assert.Equal(t, launchPlanInterruptible, execConfig.Interruptible.Value)
- assert.Equal(t, launchPlanOverwriteCache, execConfig.OverwriteCache)
- assert.Equal(t, launchPlanK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount)
- assert.Equal(t, launchPlanOutputLocationPrefix, execConfig.RawOutputDataConfig.OutputLocationPrefix)
- assert.Equal(t, launchPlanLabels, execConfig.GetLabels().Values)
- assert.Equal(t, launchPlanEnvironmentVariables, execConfig.GetEnvs().Values)
+ assert.Equal(t, launchPlanMaxParallelism, execConfig.GetMaxParallelism())
+ assert.Equal(t, launchPlanInterruptible, execConfig.GetInterruptible().GetValue())
+ assert.Equal(t, launchPlanOverwriteCache, execConfig.GetOverwriteCache())
+ assert.Equal(t, launchPlanK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount())
+ assert.Equal(t, launchPlanOutputLocationPrefix, execConfig.GetRawOutputDataConfig().GetOutputLocationPrefix())
+ assert.Equal(t, launchPlanLabels, execConfig.GetLabels().GetValues())
+ assert.Equal(t, launchPlanEnvironmentVariables, execConfig.GetEnvs().GetValues())
})
t.Run("request with no config", func(t *testing.T) {
request := &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{},
}
launchPlan := &admin.LaunchPlan{
@@ -4771,19 +4771,19 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
}
execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan)
assert.NoError(t, err)
- assert.Equal(t, launchPlanMaxParallelism, execConfig.MaxParallelism)
- assert.Equal(t, launchPlanInterruptible, execConfig.Interruptible.Value)
- assert.Equal(t, launchPlanOverwriteCache, execConfig.OverwriteCache)
- assert.Equal(t, launchPlanK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount)
- assert.Equal(t, launchPlanOutputLocationPrefix, execConfig.RawOutputDataConfig.OutputLocationPrefix)
- assert.Equal(t, launchPlanLabels, execConfig.GetLabels().Values)
- assert.Equal(t, launchPlanAnnotations, execConfig.GetAnnotations().Values)
- assert.Equal(t, launchPlanEnvironmentVariables, execConfig.GetEnvs().Values)
+ assert.Equal(t, launchPlanMaxParallelism, execConfig.GetMaxParallelism())
+ assert.Equal(t, launchPlanInterruptible, execConfig.GetInterruptible().GetValue())
+ assert.Equal(t, launchPlanOverwriteCache, execConfig.GetOverwriteCache())
+ assert.Equal(t, launchPlanK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount())
+ assert.Equal(t, launchPlanOutputLocationPrefix, execConfig.GetRawOutputDataConfig().GetOutputLocationPrefix())
+ assert.Equal(t, launchPlanLabels, execConfig.GetLabels().GetValues())
+ assert.Equal(t, launchPlanAnnotations, execConfig.GetAnnotations().GetValues())
+ assert.Equal(t, launchPlanEnvironmentVariables, execConfig.GetEnvs().GetValues())
})
t.Run("launchplan with partial config", func(t *testing.T) {
request := &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{},
}
launchPlan := &admin.LaunchPlan{
@@ -4803,18 +4803,18 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
}
execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan)
assert.NoError(t, err)
- assert.Equal(t, launchPlanMaxParallelism, execConfig.MaxParallelism)
- assert.Equal(t, rmInterruptible, execConfig.Interruptible.Value)
- assert.Equal(t, rmOverwriteCache, execConfig.OverwriteCache)
- assert.Equal(t, launchPlanK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount)
- assert.Equal(t, launchPlanOutputLocationPrefix, execConfig.RawOutputDataConfig.OutputLocationPrefix)
- assert.Equal(t, launchPlanLabels, execConfig.GetLabels().Values)
- assert.Equal(t, launchPlanAnnotations, execConfig.GetAnnotations().Values)
+ assert.Equal(t, launchPlanMaxParallelism, execConfig.GetMaxParallelism())
+ assert.Equal(t, rmInterruptible, execConfig.GetInterruptible().GetValue())
+ assert.Equal(t, rmOverwriteCache, execConfig.GetOverwriteCache())
+ assert.Equal(t, launchPlanK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount())
+ assert.Equal(t, launchPlanOutputLocationPrefix, execConfig.GetRawOutputDataConfig().GetOutputLocationPrefix())
+ assert.Equal(t, launchPlanLabels, execConfig.GetLabels().GetValues())
+ assert.Equal(t, launchPlanAnnotations, execConfig.GetAnnotations().GetValues())
})
t.Run("launchplan with no config", func(t *testing.T) {
request := &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{},
}
launchPlan := &admin.LaunchPlan{
@@ -4822,23 +4822,23 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
}
execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan)
assert.NoError(t, err)
- assert.Equal(t, rmMaxParallelism, execConfig.MaxParallelism)
- assert.Equal(t, rmInterruptible, execConfig.Interruptible.Value)
- assert.Equal(t, rmOverwriteCache, execConfig.OverwriteCache)
- assert.Equal(t, rmK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount)
- assert.Equal(t, rmOutputLocationPrefix, execConfig.RawOutputDataConfig.OutputLocationPrefix)
- assert.Equal(t, rmLabels, execConfig.GetLabels().Values)
- assert.Equal(t, rmAnnotations, execConfig.GetAnnotations().Values)
+ assert.Equal(t, rmMaxParallelism, execConfig.GetMaxParallelism())
+ assert.Equal(t, rmInterruptible, execConfig.GetInterruptible().GetValue())
+ assert.Equal(t, rmOverwriteCache, execConfig.GetOverwriteCache())
+ assert.Equal(t, rmK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount())
+ assert.Equal(t, rmOutputLocationPrefix, execConfig.GetRawOutputDataConfig().GetOutputLocationPrefix())
+ assert.Equal(t, rmLabels, execConfig.GetLabels().GetValues())
+ assert.Equal(t, rmAnnotations, execConfig.GetAnnotations().GetValues())
assert.Nil(t, execConfig.GetEnvs())
})
t.Run("matchable resource partial config", func(t *testing.T) {
resourceManager.GetResourceFunc = func(ctx context.Context,
request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) {
assert.Contains(t, []managerInterfaces.ResourceRequest{{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG,
- }, {Project: workflowIdentifier.Project,
+ }, {Project: workflowIdentifier.GetProject(),
Domain: "",
ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG},
}, request)
@@ -4860,8 +4860,8 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
}, nil
}
request := &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{},
}
launchPlan := &admin.LaunchPlan{
@@ -4869,23 +4869,23 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
}
execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan)
assert.NoError(t, err)
- assert.Equal(t, rmMaxParallelism, execConfig.MaxParallelism)
+ assert.Equal(t, rmMaxParallelism, execConfig.GetMaxParallelism())
assert.Nil(t, execConfig.GetInterruptible())
- assert.False(t, execConfig.OverwriteCache)
- assert.Equal(t, rmK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount)
+ assert.False(t, execConfig.GetOverwriteCache())
+ assert.Equal(t, rmK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount())
assert.Nil(t, execConfig.GetRawOutputDataConfig())
assert.Nil(t, execConfig.GetLabels())
- assert.Equal(t, rmAnnotations, execConfig.GetAnnotations().Values)
+ assert.Equal(t, rmAnnotations, execConfig.GetAnnotations().GetValues())
assert.Nil(t, execConfig.GetEnvs())
})
t.Run("matchable resource with no config", func(t *testing.T) {
resourceManager.GetResourceFunc = func(ctx context.Context,
request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) {
assert.Contains(t, []managerInterfaces.ResourceRequest{{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG,
- }, {Project: workflowIdentifier.Project,
+ }, {Project: workflowIdentifier.GetProject(),
Domain: "",
ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG},
}, request)
@@ -4898,8 +4898,8 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
}, nil
}
request := &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{},
}
launchPlan := &admin.LaunchPlan{
@@ -4907,10 +4907,10 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
}
execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan)
assert.NoError(t, err)
- assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism)
+ assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism())
assert.Nil(t, execConfig.GetInterruptible())
- assert.False(t, execConfig.OverwriteCache)
- assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount)
+ assert.False(t, execConfig.GetOverwriteCache())
+ assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount())
assert.Nil(t, execConfig.GetRawOutputDataConfig())
assert.Nil(t, execConfig.GetLabels())
assert.Nil(t, execConfig.GetAnnotations())
@@ -4920,10 +4920,10 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
resourceManager.GetResourceFunc = func(ctx context.Context,
request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) {
assert.Contains(t, []managerInterfaces.ResourceRequest{{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG,
- }, {Project: workflowIdentifier.Project,
+ }, {Project: workflowIdentifier.GetProject(),
Domain: "",
ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG},
}, request)
@@ -4937,8 +4937,8 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
}, nil
}
request := &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{},
}
launchPlan := &admin.LaunchPlan{
@@ -4950,10 +4950,10 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
}
execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan)
assert.NoError(t, err)
- assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism)
+ assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism())
assert.Nil(t, execConfig.GetInterruptible())
- assert.False(t, execConfig.OverwriteCache)
- assert.Equal(t, deprecatedLaunchPlanK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount)
+ assert.False(t, execConfig.GetOverwriteCache())
+ assert.Equal(t, deprecatedLaunchPlanK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount())
assert.Nil(t, execConfig.GetRawOutputDataConfig())
assert.Nil(t, execConfig.GetLabels())
assert.Nil(t, execConfig.GetAnnotations())
@@ -4963,11 +4963,11 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
resourceManager.GetResourceFunc = func(ctx context.Context,
request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) {
assert.Contains(t, []managerInterfaces.ResourceRequest{{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG,
- Workflow: workflowIdentifier.Name,
- }, {Project: workflowIdentifier.Project,
+ Workflow: workflowIdentifier.GetName(),
+ }, {Project: workflowIdentifier.GetProject(),
Domain: "",
Workflow: "",
ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG},
@@ -4991,23 +4991,23 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
}, nil
}
request := &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{},
}
launchPlan := &admin.LaunchPlan{
Spec: &admin.LaunchPlanSpec{
WorkflowId: &core.Identifier{
- Name: workflowIdentifier.Name,
+ Name: workflowIdentifier.GetName(),
},
},
}
execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan)
assert.NoError(t, err)
- assert.Equal(t, int32(300), execConfig.MaxParallelism)
- assert.True(t, execConfig.Interruptible.Value)
- assert.True(t, execConfig.OverwriteCache)
- assert.Equal(t, "workflowDefault", execConfig.SecurityContext.RunAs.K8SServiceAccount)
+ assert.Equal(t, int32(300), execConfig.GetMaxParallelism())
+ assert.True(t, execConfig.GetInterruptible().GetValue())
+ assert.True(t, execConfig.GetOverwriteCache())
+ assert.Equal(t, "workflowDefault", execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount())
assert.Nil(t, execConfig.GetRawOutputDataConfig())
assert.Nil(t, execConfig.GetLabels())
assert.Nil(t, execConfig.GetAnnotations())
@@ -5017,18 +5017,18 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
resourceManager.GetResourceFunc = func(ctx context.Context,
request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) {
assert.Contains(t, []managerInterfaces.ResourceRequest{{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG,
- }, {Project: workflowIdentifier.Project,
+ }, {Project: workflowIdentifier.GetProject(),
Domain: "",
ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG},
}, request)
return nil, fmt.Errorf("failed to fetch the resources")
}
request := &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{},
}
launchPlan := &admin.LaunchPlan{
@@ -5049,10 +5049,10 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
resourceManager.GetResourceFunc = func(ctx context.Context,
request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) {
assert.Contains(t, []managerInterfaces.ResourceRequest{{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG,
- }, {Project: workflowIdentifier.Project,
+ }, {Project: workflowIdentifier.GetProject(),
Domain: "",
ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG},
}, request)
@@ -5070,8 +5070,8 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
t.Run("request with interruptible override disabled", func(t *testing.T) {
request := &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{
Interruptible: &wrappers.BoolValue{Value: false},
},
@@ -5079,17 +5079,17 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, nil)
assert.NoError(t, err)
- assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism)
- assert.False(t, execConfig.Interruptible.Value)
- assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount)
+ assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism())
+ assert.False(t, execConfig.GetInterruptible().GetValue())
+ assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount())
assert.Nil(t, execConfig.GetRawOutputDataConfig())
assert.Nil(t, execConfig.GetLabels())
assert.Nil(t, execConfig.GetAnnotations())
})
t.Run("request with interruptible override enabled", func(t *testing.T) {
request := &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{
Interruptible: &wrappers.BoolValue{Value: true},
},
@@ -5097,33 +5097,33 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, nil)
assert.NoError(t, err)
- assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism)
- assert.True(t, execConfig.Interruptible.Value)
- assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount)
+ assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism())
+ assert.True(t, execConfig.GetInterruptible().GetValue())
+ assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount())
assert.Nil(t, execConfig.GetRawOutputDataConfig())
assert.Nil(t, execConfig.GetLabels())
assert.Nil(t, execConfig.GetAnnotations())
})
t.Run("request with no interruptible override specified", func(t *testing.T) {
request := &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{},
}
execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, nil)
assert.NoError(t, err)
- assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism)
- assert.True(t, execConfig.Interruptible.Value)
- assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount)
+ assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism())
+ assert.True(t, execConfig.GetInterruptible().GetValue())
+ assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount())
assert.Nil(t, execConfig.GetRawOutputDataConfig())
assert.Nil(t, execConfig.GetLabels())
assert.Nil(t, execConfig.GetAnnotations())
})
t.Run("launch plan with interruptible override disabled", func(t *testing.T) {
request := &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{},
}
@@ -5135,17 +5135,17 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan)
assert.NoError(t, err)
- assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism)
- assert.False(t, execConfig.Interruptible.Value)
- assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount)
+ assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism())
+ assert.False(t, execConfig.GetInterruptible().GetValue())
+ assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount())
assert.Nil(t, execConfig.GetRawOutputDataConfig())
assert.Nil(t, execConfig.GetLabels())
assert.Nil(t, execConfig.GetAnnotations())
})
t.Run("launch plan with interruptible override enabled", func(t *testing.T) {
request := &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{},
}
@@ -5158,20 +5158,20 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan)
assert.NoError(t, err)
- assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism)
- assert.True(t, execConfig.Interruptible.Value)
- assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount)
+ assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism())
+ assert.True(t, execConfig.GetInterruptible().GetValue())
+ assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount())
assert.Nil(t, execConfig.GetRawOutputDataConfig())
assert.Nil(t, execConfig.GetLabels())
assert.Nil(t, execConfig.GetAnnotations())
- assert.Equal(t, 1, len(execConfig.Envs.Values))
- assert.Equal(t, "foo", execConfig.Envs.Values[0].Key)
- assert.Equal(t, "bar", execConfig.Envs.Values[0].Value)
+ assert.Equal(t, 1, len(execConfig.GetEnvs().GetValues()))
+ assert.Equal(t, "foo", execConfig.GetEnvs().GetValues()[0].GetKey())
+ assert.Equal(t, "bar", execConfig.GetEnvs().GetValues()[0].GetValue())
})
t.Run("launch plan with no interruptible override specified", func(t *testing.T) {
request := &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{},
}
@@ -5181,17 +5181,17 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan)
assert.NoError(t, err)
- assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism)
- assert.True(t, execConfig.Interruptible.Value)
- assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount)
+ assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism())
+ assert.True(t, execConfig.GetInterruptible().GetValue())
+ assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount())
assert.Nil(t, execConfig.GetRawOutputDataConfig())
assert.Nil(t, execConfig.GetLabels())
assert.Nil(t, execConfig.GetAnnotations())
})
t.Run("request and launch plan with different interruptible overrides", func(t *testing.T) {
request := &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{
Interruptible: &wrappers.BoolValue{Value: true},
},
@@ -5205,17 +5205,17 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan)
assert.NoError(t, err)
- assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism)
- assert.True(t, execConfig.Interruptible.Value)
- assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount)
+ assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism())
+ assert.True(t, execConfig.GetInterruptible().GetValue())
+ assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount())
assert.Nil(t, execConfig.GetRawOutputDataConfig())
assert.Nil(t, execConfig.GetLabels())
assert.Nil(t, execConfig.GetAnnotations())
})
t.Run("request with skip cache override enabled", func(t *testing.T) {
request := &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{
OverwriteCache: true,
},
@@ -5223,33 +5223,33 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, nil)
assert.NoError(t, err)
- assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism)
- assert.True(t, execConfig.OverwriteCache)
- assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount)
+ assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism())
+ assert.True(t, execConfig.GetOverwriteCache())
+ assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount())
assert.Nil(t, execConfig.GetRawOutputDataConfig())
assert.Nil(t, execConfig.GetLabels())
assert.Nil(t, execConfig.GetAnnotations())
})
t.Run("request with no skip cache override specified", func(t *testing.T) {
request := &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{},
}
execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, nil)
assert.NoError(t, err)
- assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism)
- assert.True(t, execConfig.OverwriteCache)
- assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount)
+ assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism())
+ assert.True(t, execConfig.GetOverwriteCache())
+ assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount())
assert.Nil(t, execConfig.GetRawOutputDataConfig())
assert.Nil(t, execConfig.GetLabels())
assert.Nil(t, execConfig.GetAnnotations())
})
t.Run("launch plan with skip cache override enabled", func(t *testing.T) {
request := &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{},
}
@@ -5261,17 +5261,17 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan)
assert.NoError(t, err)
- assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism)
- assert.True(t, execConfig.OverwriteCache)
- assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount)
+ assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism())
+ assert.True(t, execConfig.GetOverwriteCache())
+ assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount())
assert.Nil(t, execConfig.GetRawOutputDataConfig())
assert.Nil(t, execConfig.GetLabels())
assert.Nil(t, execConfig.GetAnnotations())
})
t.Run("launch plan with no skip cache override specified", func(t *testing.T) {
request := &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{},
}
@@ -5281,17 +5281,17 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan)
assert.NoError(t, err)
- assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism)
- assert.True(t, execConfig.OverwriteCache)
- assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount)
+ assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism())
+ assert.True(t, execConfig.GetOverwriteCache())
+ assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount())
assert.Nil(t, execConfig.GetRawOutputDataConfig())
assert.Nil(t, execConfig.GetLabels())
assert.Nil(t, execConfig.GetAnnotations())
})
t.Run("request and launch plan with different skip cache overrides", func(t *testing.T) {
request := &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{
OverwriteCache: true,
},
@@ -5305,9 +5305,9 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan)
assert.NoError(t, err)
- assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism)
- assert.True(t, execConfig.OverwriteCache)
- assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount)
+ assert.Equal(t, defaultMaxParallelism, execConfig.GetMaxParallelism())
+ assert.True(t, execConfig.GetOverwriteCache())
+ assert.Equal(t, defaultK8sServiceAccount, execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount())
assert.Nil(t, execConfig.GetRawOutputDataConfig())
assert.Nil(t, execConfig.GetLabels())
assert.Nil(t, execConfig.GetAnnotations())
@@ -5316,13 +5316,13 @@ func TestGetExecutionConfigOverrides(t *testing.T) {
t.Run("test pick up security context from admin system config", func(t *testing.T) {
executionManager.config.ApplicationConfiguration().GetTopLevelConfig().K8SServiceAccount = "flyte-test"
request := &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{},
}
execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, nil)
assert.NoError(t, err)
- assert.Equal(t, "flyte-test", execConfig.SecurityContext.RunAs.K8SServiceAccount)
+ assert.Equal(t, "flyte-test", execConfig.GetSecurityContext().GetRunAs().GetK8SServiceAccount())
executionManager.config.ApplicationConfiguration().GetTopLevelConfig().K8SServiceAccount = defaultK8sServiceAccount
})
})
@@ -5333,10 +5333,10 @@ func TestGetExecutionConfig(t *testing.T) {
resourceManager.GetResourceFunc = func(ctx context.Context,
request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) {
assert.Contains(t, []managerInterfaces.ResourceRequest{{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG,
- }, {Project: workflowIdentifier.Project,
+ }, {Project: workflowIdentifier.GetProject(),
Domain: "",
ResourceType: admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG},
}, request)
@@ -5358,13 +5358,13 @@ func TestGetExecutionConfig(t *testing.T) {
config: applicationConfig,
}
execConfig, err := executionManager.getExecutionConfig(context.TODO(), &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{},
}, nil)
assert.NoError(t, err)
- assert.Equal(t, execConfig.MaxParallelism, int32(100))
- assert.True(t, execConfig.OverwriteCache)
+ assert.Equal(t, execConfig.GetMaxParallelism(), int32(100))
+ assert.True(t, execConfig.GetOverwriteCache())
}
func TestGetExecutionConfig_Spec(t *testing.T) {
@@ -5379,8 +5379,8 @@ func TestGetExecutionConfig_Spec(t *testing.T) {
config: applicationConfig,
}
execConfig, err := executionManager.getExecutionConfig(context.TODO(), &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{
MaxParallelism: 100,
OverwriteCache: true,
@@ -5392,12 +5392,12 @@ func TestGetExecutionConfig_Spec(t *testing.T) {
},
})
assert.NoError(t, err)
- assert.Equal(t, int32(100), execConfig.MaxParallelism)
- assert.True(t, execConfig.OverwriteCache)
+ assert.Equal(t, int32(100), execConfig.GetMaxParallelism())
+ assert.True(t, execConfig.GetOverwriteCache())
execConfig, err = executionManager.getExecutionConfig(context.TODO(), &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{},
}, &admin.LaunchPlan{
Spec: &admin.LaunchPlanSpec{
@@ -5406,8 +5406,8 @@ func TestGetExecutionConfig_Spec(t *testing.T) {
},
})
assert.NoError(t, err)
- assert.Equal(t, int32(50), execConfig.MaxParallelism)
- assert.True(t, execConfig.OverwriteCache)
+ assert.Equal(t, int32(50), execConfig.GetMaxParallelism())
+ assert.True(t, execConfig.GetOverwriteCache())
resourceManager = managerMocks.MockResourceManager{}
resourceManager.GetResourceFunc = func(ctx context.Context,
@@ -5422,15 +5422,15 @@ func TestGetExecutionConfig_Spec(t *testing.T) {
executionManager.config.ApplicationConfiguration().GetTopLevelConfig().OverwriteCache = true
execConfig, err = executionManager.getExecutionConfig(context.TODO(), &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{},
}, &admin.LaunchPlan{
Spec: &admin.LaunchPlanSpec{},
})
assert.NoError(t, err)
- assert.Equal(t, execConfig.MaxParallelism, int32(25))
- assert.True(t, execConfig.OverwriteCache)
+ assert.Equal(t, execConfig.GetMaxParallelism(), int32(25))
+ assert.True(t, execConfig.GetOverwriteCache())
}
func TestGetClusterAssignment(t *testing.T) {
@@ -5439,8 +5439,8 @@ func TestGetClusterAssignment(t *testing.T) {
resourceManager.GetResourceFunc = func(ctx context.Context,
request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) {
assert.EqualValues(t, request, managerInterfaces.ResourceRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
ResourceType: admin.MatchableResource_CLUSTER_ASSIGNMENT,
})
return &managerInterfaces.ResourceResponse{
@@ -5457,8 +5457,8 @@ func TestGetClusterAssignment(t *testing.T) {
}
t.Run("value from db", func(t *testing.T) {
ca, err := executionManager.getClusterAssignment(context.TODO(), &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{},
})
assert.NoError(t, err)
@@ -5481,8 +5481,8 @@ func TestGetClusterAssignment(t *testing.T) {
}
ca, err := executionManager.getClusterAssignment(context.TODO(), &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{},
})
assert.NoError(t, err)
@@ -5491,8 +5491,8 @@ func TestGetClusterAssignment(t *testing.T) {
t.Run("value from request matches value from config", func(t *testing.T) {
reqClusterAssignment := admin.ClusterAssignment{ClusterPoolName: "gpu"}
ca, err := executionManager.getClusterAssignment(context.TODO(), &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{
ClusterAssignment: &reqClusterAssignment,
},
@@ -5510,8 +5510,8 @@ func TestGetClusterAssignment(t *testing.T) {
reqClusterAssignment := admin.ClusterAssignment{ClusterPoolName: "gpu"}
ca, err := executionManager.getClusterAssignment(context.TODO(), &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{
ClusterAssignment: &reqClusterAssignment,
},
@@ -5536,8 +5536,8 @@ func TestGetClusterAssignment(t *testing.T) {
reqClusterAssignment := admin.ClusterAssignment{ClusterPoolName: "gpu"}
ca, err := executionManager.getClusterAssignment(context.TODO(), &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{
ClusterAssignment: &reqClusterAssignment,
},
@@ -5548,8 +5548,8 @@ func TestGetClusterAssignment(t *testing.T) {
t.Run("value from request doesn't match value from config", func(t *testing.T) {
reqClusterAssignment := admin.ClusterAssignment{ClusterPoolName: "swimming-pool"}
_, err := executionManager.getClusterAssignment(context.TODO(), &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{
ClusterAssignment: &reqClusterAssignment,
},
@@ -5564,8 +5564,8 @@ func TestGetClusterAssignment(t *testing.T) {
resourceManager.GetResourceFunc = func(ctx context.Context,
request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) {
assert.EqualValues(t, request, managerInterfaces.ResourceRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
ResourceType: admin.MatchableResource_CLUSTER_ASSIGNMENT,
})
return &managerInterfaces.ResourceResponse{
@@ -5578,8 +5578,8 @@ func TestGetClusterAssignment(t *testing.T) {
}
_, err := executionManager.getClusterAssignment(context.TODO(), &admin.ExecutionCreateRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
Spec: &admin.ExecutionSpec{},
})
@@ -5622,8 +5622,8 @@ func TestResolvePermissions(t *testing.T) {
}
authRole := resolveAuthRole(execRequest, lp)
sc := resolveSecurityCtx(context.TODO(), execConfigSecCtx, authRole)
- assert.Equal(t, assumableIamRole, authRole.AssumableIamRole)
- assert.Equal(t, k8sServiceAccount, authRole.KubernetesServiceAccount)
+ assert.Equal(t, assumableIamRole, authRole.GetAssumableIamRole())
+ assert.Equal(t, k8sServiceAccount, authRole.GetKubernetesServiceAccount())
assert.Equal(t, &core.SecurityContext{
RunAs: &core.Identity{
IamRole: assumableIamRole,
@@ -5659,10 +5659,10 @@ func TestResolvePermissions(t *testing.T) {
},
}
sc := resolveSecurityCtx(context.TODO(), execConfigSecCtx, authRole)
- assert.Equal(t, "", authRole.AssumableIamRole)
- assert.Equal(t, "", authRole.KubernetesServiceAccount)
- assert.Equal(t, assumableIamRoleSc, sc.RunAs.IamRole)
- assert.Equal(t, k8sServiceAccountSc, sc.RunAs.K8SServiceAccount)
+ assert.Equal(t, "", authRole.GetAssumableIamRole())
+ assert.Equal(t, "", authRole.GetKubernetesServiceAccount())
+ assert.Equal(t, assumableIamRoleSc, sc.GetRunAs().GetIamRole())
+ assert.Equal(t, k8sServiceAccountSc, sc.GetRunAs().GetK8SServiceAccount())
})
t.Run("prefer lp auth role over auth", func(t *testing.T) {
execRequest := &admin.ExecutionCreateRequest{
@@ -5685,8 +5685,8 @@ func TestResolvePermissions(t *testing.T) {
RunAs: &core.Identity{},
}
sc := resolveSecurityCtx(context.TODO(), execConfigSecCtx, authRole)
- assert.Equal(t, assumableIamRole, authRole.AssumableIamRole)
- assert.Equal(t, k8sServiceAccount, authRole.KubernetesServiceAccount)
+ assert.Equal(t, assumableIamRole, authRole.GetAssumableIamRole())
+ assert.Equal(t, k8sServiceAccount, authRole.GetKubernetesServiceAccount())
assert.Equal(t, &core.SecurityContext{
RunAs: &core.Identity{
IamRole: assumableIamRole,
@@ -5731,10 +5731,10 @@ func TestResolvePermissions(t *testing.T) {
},
}
sc := resolveSecurityCtx(context.TODO(), execConfigSecCtx, authRole)
- assert.Equal(t, assumableIamRole, authRole.AssumableIamRole)
- assert.Equal(t, k8sServiceAccount, authRole.KubernetesServiceAccount)
- assert.Equal(t, assumableIamRoleSc, sc.RunAs.IamRole)
- assert.Equal(t, k8sServiceAccountSc, sc.RunAs.K8SServiceAccount)
+ assert.Equal(t, assumableIamRole, authRole.GetAssumableIamRole())
+ assert.Equal(t, k8sServiceAccount, authRole.GetKubernetesServiceAccount())
+ assert.Equal(t, assumableIamRoleSc, sc.GetRunAs().GetIamRole())
+ assert.Equal(t, k8sServiceAccountSc, sc.GetRunAs().GetK8SServiceAccount())
})
t.Run("prefer lp auth over role", func(t *testing.T) {
execRequest := &admin.ExecutionCreateRequest{
@@ -5757,8 +5757,8 @@ func TestResolvePermissions(t *testing.T) {
},
}
sc := resolveSecurityCtx(context.TODO(), execConfigSecCtx, authRole)
- assert.Equal(t, assumableIamRole, authRole.AssumableIamRole)
- assert.Equal(t, k8sServiceAccount, authRole.KubernetesServiceAccount)
+ assert.Equal(t, assumableIamRole, authRole.GetAssumableIamRole())
+ assert.Equal(t, k8sServiceAccount, authRole.GetKubernetesServiceAccount())
assert.Equal(t, &core.SecurityContext{
RunAs: &core.Identity{
IamRole: assumableIamRole,
@@ -5778,8 +5778,8 @@ func TestResolvePermissions(t *testing.T) {
Role: "old role",
},
})
- assert.Equal(t, assumableIamRoleLp, authRole.AssumableIamRole)
- assert.Equal(t, k8sServiceAccountLp, authRole.KubernetesServiceAccount)
+ assert.Equal(t, assumableIamRoleLp, authRole.GetAssumableIamRole())
+ assert.Equal(t, k8sServiceAccountLp, authRole.GetKubernetesServiceAccount())
})
}
@@ -5859,7 +5859,7 @@ func TestQueryTemplate(t *testing.T) {
},
}
- filledQuery, err := m.fillInTemplateArgs(ctx, q, otherInputs.Literals)
+ filledQuery, err := m.fillInTemplateArgs(ctx, q, otherInputs.GetLiterals())
assert.NoError(t, err)
assert.True(t, proto.Equal(q, filledQuery))
})
@@ -5881,11 +5881,11 @@ func TestQueryTemplate(t *testing.T) {
},
}
- filledQuery, err := m.fillInTemplateArgs(ctx, q, otherInputs.Literals)
+ filledQuery, err := m.fillInTemplateArgs(ctx, q, otherInputs.GetLiterals())
assert.NoError(t, err)
- staticTime := filledQuery.GetArtifactId().Partitions.Value["partition1"].GetStaticValue()
+ staticTime := filledQuery.GetArtifactId().GetPartitions().GetValue()["partition1"].GetStaticValue()
assert.Equal(t, "2063-04-05", staticTime)
- assert.Equal(t, int64(2942956800), filledQuery.GetArtifactId().TimePartition.Value.GetTimeValue().Seconds)
+ assert.Equal(t, int64(2942956800), filledQuery.GetArtifactId().GetTimePartition().GetValue().GetTimeValue().GetSeconds())
})
t.Run("something missing", func(t *testing.T) {
@@ -5905,7 +5905,7 @@ func TestQueryTemplate(t *testing.T) {
},
}
- _, err := m.fillInTemplateArgs(ctx, q, otherInputs.Literals)
+ _, err := m.fillInTemplateArgs(ctx, q, otherInputs.GetLiterals())
assert.Error(t, err)
})
}
diff --git a/flyteadmin/pkg/manager/impl/executions/quality_of_service.go b/flyteadmin/pkg/manager/impl/executions/quality_of_service.go
index a96d99d3d6..c2b6f8d3da 100644
--- a/flyteadmin/pkg/manager/impl/executions/quality_of_service.go
+++ b/flyteadmin/pkg/manager/impl/executions/quality_of_service.go
@@ -37,9 +37,9 @@ type qualityOfServiceAllocator struct {
func (q qualityOfServiceAllocator) getQualityOfServiceFromDb(ctx context.Context, workflowIdentifier *core.Identifier) (
*core.QualityOfService, error) {
resource, err := q.resourceManager.GetResource(ctx, interfaces.ResourceRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
- Workflow: workflowIdentifier.Name,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
+ Workflow: workflowIdentifier.GetName(),
ResourceType: admin.MatchableResource_QUALITY_OF_SERVICE_SPECIFICATION,
})
if err != nil {
@@ -79,63 +79,62 @@ QualityOfService spec to apply.
This method handles resolving the QualityOfService for an execution given the above rules.
*/
func (q qualityOfServiceAllocator) GetQualityOfService(ctx context.Context, input GetQualityOfServiceInput) (QualityOfServiceSpec, error) {
- workflowIdentifier := input.Workflow.Id
+ workflowIdentifier := input.Workflow.GetId()
var qualityOfServiceTier core.QualityOfService_Tier
- if input.ExecutionCreateRequest.Spec.QualityOfService != nil {
- if input.ExecutionCreateRequest.Spec.QualityOfService.GetSpec() != nil {
+ if input.ExecutionCreateRequest.GetSpec().GetQualityOfService() != nil {
+ if input.ExecutionCreateRequest.GetSpec().GetQualityOfService().GetSpec() != nil {
logger.Debugf(ctx, "Determining quality of service from execution spec for [%s/%s/%s]",
- input.ExecutionCreateRequest.Project, input.ExecutionCreateRequest.Domain,
- input.ExecutionCreateRequest.Name)
- duration, err := ptypes.Duration(input.ExecutionCreateRequest.Spec.QualityOfService.GetSpec().QueueingBudget)
+ input.ExecutionCreateRequest.GetProject(), input.ExecutionCreateRequest.GetDomain(),
+ input.ExecutionCreateRequest.GetName())
+ duration, err := ptypes.Duration(input.ExecutionCreateRequest.GetSpec().GetQualityOfService().GetSpec().GetQueueingBudget())
if err != nil {
return QualityOfServiceSpec{}, errors.NewFlyteAdminErrorf(codes.InvalidArgument,
"Invalid custom quality of service set in create execution request [%s/%s/%s], failed to parse duration [%v] with: %v",
- input.ExecutionCreateRequest.Project, input.ExecutionCreateRequest.Domain,
- input.ExecutionCreateRequest.Name,
- input.ExecutionCreateRequest.Spec.QualityOfService.GetSpec().QueueingBudget, err)
+ input.ExecutionCreateRequest.GetProject(), input.ExecutionCreateRequest.GetDomain(),
+ input.ExecutionCreateRequest.GetName(),
+ input.ExecutionCreateRequest.GetSpec().GetQualityOfService().GetSpec().GetQueueingBudget(), err)
}
return QualityOfServiceSpec{
QueuingBudget: duration,
}, nil
}
- qualityOfServiceTier = input.ExecutionCreateRequest.Spec.QualityOfService.GetTier()
- } else if input.LaunchPlan.Spec.QualityOfService != nil {
- if input.LaunchPlan.Spec.QualityOfService.GetSpec() != nil {
+ qualityOfServiceTier = input.ExecutionCreateRequest.GetSpec().GetQualityOfService().GetTier()
+ } else if input.LaunchPlan.GetSpec().GetQualityOfService() != nil {
+ if input.LaunchPlan.GetSpec().GetQualityOfService().GetSpec() != nil {
logger.Debugf(ctx, "Determining quality of service from launch plan spec for [%s/%s/%s]",
- input.ExecutionCreateRequest.Project, input.ExecutionCreateRequest.Domain,
- input.ExecutionCreateRequest.Name)
- duration, err := ptypes.Duration(input.LaunchPlan.Spec.QualityOfService.GetSpec().QueueingBudget)
+ input.ExecutionCreateRequest.GetProject(), input.ExecutionCreateRequest.GetDomain(),
+ input.ExecutionCreateRequest.GetName())
+ duration, err := ptypes.Duration(input.LaunchPlan.GetSpec().GetQualityOfService().GetSpec().GetQueueingBudget())
if err != nil {
return QualityOfServiceSpec{}, errors.NewFlyteAdminErrorf(codes.InvalidArgument,
"Invalid custom quality of service set in launch plan [%v], failed to parse duration [%v] with: %v",
- input.LaunchPlan.Id,
- input.ExecutionCreateRequest.Spec.QualityOfService.GetSpec().QueueingBudget, err)
+ input.LaunchPlan.GetId(),
+ input.ExecutionCreateRequest.GetSpec().GetQualityOfService().GetSpec().GetQueueingBudget(), err)
}
return QualityOfServiceSpec{
QueuingBudget: duration,
}, nil
}
- qualityOfServiceTier = input.LaunchPlan.Spec.QualityOfService.GetTier()
- } else if input.Workflow.Closure.CompiledWorkflow.Primary.Template.Metadata != nil &&
- input.Workflow.Closure.CompiledWorkflow.Primary.Template.Metadata.QualityOfService != nil {
+ qualityOfServiceTier = input.LaunchPlan.GetSpec().GetQualityOfService().GetTier()
+ } else if input.Workflow.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetMetadata() != nil &&
+ input.Workflow.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetMetadata().GetQualityOfService() != nil {
logger.Debugf(ctx, "Determining quality of service from workflow spec for [%s/%s/%s]",
- input.ExecutionCreateRequest.Project, input.ExecutionCreateRequest.Domain,
- input.ExecutionCreateRequest.Name)
- if input.Workflow.Closure.CompiledWorkflow.Primary.Template.Metadata.QualityOfService.GetSpec() != nil {
- duration, err := ptypes.Duration(input.Workflow.Closure.CompiledWorkflow.Primary.Template.Metadata.QualityOfService.
- GetSpec().QueueingBudget)
+ input.ExecutionCreateRequest.GetProject(), input.ExecutionCreateRequest.GetDomain(),
+ input.ExecutionCreateRequest.GetName())
+ if input.Workflow.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetMetadata().GetQualityOfService().GetSpec() != nil {
+ duration, err := ptypes.Duration(input.Workflow.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetMetadata().GetQualityOfService().GetSpec().GetQueueingBudget())
if err != nil {
return QualityOfServiceSpec{}, errors.NewFlyteAdminErrorf(codes.InvalidArgument,
"Invalid custom quality of service set in workflow [%v], failed to parse duration [%v] with: %v",
workflowIdentifier,
- input.ExecutionCreateRequest.Spec.QualityOfService.GetSpec().QueueingBudget, err)
+ input.ExecutionCreateRequest.GetSpec().GetQualityOfService().GetSpec().GetQueueingBudget(), err)
}
return QualityOfServiceSpec{
QueuingBudget: duration,
}, nil
}
- qualityOfServiceTier = input.Workflow.Closure.CompiledWorkflow.Primary.Template.Metadata.QualityOfService.GetTier()
+ qualityOfServiceTier = input.Workflow.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetMetadata().GetQualityOfService().GetTier()
}
// If nothing in the hierarchy of registrable entities has set the quality of service,
@@ -147,23 +146,23 @@ func (q qualityOfServiceAllocator) GetQualityOfService(ctx context.Context, inpu
}
if qualityOfService != nil && qualityOfService.GetSpec() != nil {
logger.Debugf(ctx, "Determining quality of service from spec database override for [%s/%s/%s]",
- input.ExecutionCreateRequest.Project, input.ExecutionCreateRequest.Domain,
- input.ExecutionCreateRequest.Name)
- duration, err := ptypes.Duration(qualityOfService.GetSpec().QueueingBudget)
+ input.ExecutionCreateRequest.GetProject(), input.ExecutionCreateRequest.GetDomain(),
+ input.ExecutionCreateRequest.GetName())
+ duration, err := ptypes.Duration(qualityOfService.GetSpec().GetQueueingBudget())
if err != nil {
return QualityOfServiceSpec{}, errors.NewFlyteAdminErrorf(codes.InvalidArgument,
"Invalid custom quality of service set in overridable matching attributes for [%v],"+
"failed to parse duration [%v] with: %v", workflowIdentifier,
- input.ExecutionCreateRequest.Spec.QualityOfService.GetSpec().QueueingBudget, err)
+ input.ExecutionCreateRequest.GetSpec().GetQualityOfService().GetSpec().GetQueueingBudget(), err)
}
return QualityOfServiceSpec{
QueuingBudget: duration,
}, nil
} else if qualityOfService != nil && qualityOfService.GetTier() != core.QualityOfService_UNDEFINED {
logger.Debugf(ctx, "Determining quality of service tier from database override for [%s/%s/%s]",
- input.ExecutionCreateRequest.Project, input.ExecutionCreateRequest.Domain,
- input.ExecutionCreateRequest.Name)
- qualityOfServiceTier = input.Workflow.Closure.CompiledWorkflow.Primary.Template.Metadata.QualityOfService.GetTier()
+ input.ExecutionCreateRequest.GetProject(), input.ExecutionCreateRequest.GetDomain(),
+ input.ExecutionCreateRequest.GetName())
+ qualityOfServiceTier = input.Workflow.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetMetadata().GetQualityOfService().GetTier()
}
}
@@ -171,10 +170,10 @@ func (q qualityOfServiceAllocator) GetQualityOfService(ctx context.Context, inpu
// set, use the default values from the admin application config.
if qualityOfServiceTier == core.QualityOfService_UNDEFINED {
logger.Debugf(ctx, "Determining quality of service tier from application config override for [%s/%s/%s]",
- input.ExecutionCreateRequest.Project, input.ExecutionCreateRequest.Domain,
- input.ExecutionCreateRequest.Name)
+ input.ExecutionCreateRequest.GetProject(), input.ExecutionCreateRequest.GetDomain(),
+ input.ExecutionCreateRequest.GetName())
var ok bool
- qualityOfServiceTier, ok = q.config.QualityOfServiceConfiguration().GetDefaultTiers()[input.ExecutionCreateRequest.Domain]
+ qualityOfServiceTier, ok = q.config.QualityOfServiceConfiguration().GetDefaultTiers()[input.ExecutionCreateRequest.GetDomain()]
if !ok {
// No queueing budget to set when no default is specified
return QualityOfServiceSpec{}, nil
@@ -186,10 +185,10 @@ func (q qualityOfServiceAllocator) GetQualityOfService(ctx context.Context, inpu
return QualityOfServiceSpec{}, nil
}
logger.Debugf(ctx, "Determining quality of service spec from application config override for [%s/%s/%s] with tier [%v]",
- input.ExecutionCreateRequest.Project, input.ExecutionCreateRequest.Domain,
- input.ExecutionCreateRequest.Name, qualityOfServiceTier)
+ input.ExecutionCreateRequest.GetProject(), input.ExecutionCreateRequest.GetDomain(),
+ input.ExecutionCreateRequest.GetName(), qualityOfServiceTier)
// Config values should always be vetted so there's no need to check the error from conversion.
- duration, _ := ptypes.Duration(executionValues.QueueingBudget)
+ duration, _ := ptypes.Duration(executionValues.GetQueueingBudget())
return QualityOfServiceSpec{
QueuingBudget: duration,
diff --git a/flyteadmin/pkg/manager/impl/executions/quality_of_service_test.go b/flyteadmin/pkg/manager/impl/executions/quality_of_service_test.go
index 41a04ec2bc..0ad76cd3c7 100644
--- a/flyteadmin/pkg/manager/impl/executions/quality_of_service_test.go
+++ b/flyteadmin/pkg/manager/impl/executions/quality_of_service_test.go
@@ -63,9 +63,9 @@ func addGetResourceFunc(t *testing.T, resourceManager interfaces.ResourceInterfa
resourceManager.(*managerMocks.MockResourceManager).GetResourceFunc = func(ctx context.Context,
request interfaces.ResourceRequest) (*interfaces.ResourceResponse, error) {
assert.EqualValues(t, request, interfaces.ResourceRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
- Workflow: workflowIdentifier.Name,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
+ Workflow: workflowIdentifier.GetName(),
ResourceType: admin.MatchableResource_QUALITY_OF_SERVICE_SPECIFICATION,
})
return &interfaces.ResourceResponse{
diff --git a/flyteadmin/pkg/manager/impl/executions/queues.go b/flyteadmin/pkg/manager/impl/executions/queues.go
index 90a5951a33..2064626717 100644
--- a/flyteadmin/pkg/manager/impl/executions/queues.go
+++ b/flyteadmin/pkg/manager/impl/executions/queues.go
@@ -59,9 +59,9 @@ func (q *queueAllocatorImpl) GetQueue(ctx context.Context, identifier *core.Iden
q.refreshExecutionQueues(executionQueues)
resource, err := q.resourceManager.GetResource(ctx, interfaces.ResourceRequest{
- Project: identifier.Project,
- Domain: identifier.Domain,
- Workflow: identifier.Name,
+ Project: identifier.GetProject(),
+ Domain: identifier.GetDomain(),
+ Workflow: identifier.GetName(),
ResourceType: admin.MatchableResource_EXECUTION_QUEUE,
})
@@ -71,7 +71,7 @@ func (q *queueAllocatorImpl) GetQueue(ctx context.Context, identifier *core.Iden
}
if resource != nil && resource.Attributes != nil && resource.Attributes.GetExecutionQueueAttributes() != nil {
- for _, tag := range resource.Attributes.GetExecutionQueueAttributes().Tags {
+ for _, tag := range resource.Attributes.GetExecutionQueueAttributes().GetTags() {
matches, ok := q.queueConfigMap[tag]
if !ok {
continue
@@ -84,7 +84,7 @@ func (q *queueAllocatorImpl) GetQueue(ctx context.Context, identifier *core.Iden
var defaultTags []string
// If we've made it this far, check to see if a domain-specific default workflow config exists for this particular domain.
for _, workflowConfig := range q.config.QueueConfiguration().GetWorkflowConfigs() {
- if workflowConfig.Domain == identifier.Domain {
+ if workflowConfig.Domain == identifier.GetDomain() {
tags = workflowConfig.Tags
} else if len(workflowConfig.Domain) == 0 {
defaultTags = workflowConfig.Tags
diff --git a/flyteadmin/pkg/manager/impl/launch_plan_manager.go b/flyteadmin/pkg/manager/impl/launch_plan_manager.go
index 74f0571f86..b1d0d8d56d 100644
--- a/flyteadmin/pkg/manager/impl/launch_plan_manager.go
+++ b/flyteadmin/pkg/manager/impl/launch_plan_manager.go
@@ -41,13 +41,13 @@ type LaunchPlanManager struct {
}
func getLaunchPlanContext(ctx context.Context, identifier *core.Identifier) context.Context {
- ctx = contextutils.WithProjectDomain(ctx, identifier.Project, identifier.Domain)
- return contextutils.WithLaunchPlanID(ctx, identifier.Name)
+ ctx = contextutils.WithProjectDomain(ctx, identifier.GetProject(), identifier.GetDomain())
+ return contextutils.WithLaunchPlanID(ctx, identifier.GetName())
}
func (m *LaunchPlanManager) getNamedEntityContext(ctx context.Context, identifier *admin.NamedEntityIdentifier) context.Context {
- ctx = contextutils.WithProjectDomain(ctx, identifier.Project, identifier.Domain)
- return contextutils.WithLaunchPlanID(ctx, identifier.Name)
+ ctx = contextutils.WithProjectDomain(ctx, identifier.GetProject(), identifier.GetDomain())
+ return contextutils.WithLaunchPlanID(ctx, identifier.GetName())
}
func (m *LaunchPlanManager) CreateLaunchPlan(
@@ -57,35 +57,35 @@ func (m *LaunchPlanManager) CreateLaunchPlan(
logger.Debugf(ctx, "Failed to validate provided workflow ID for CreateLaunchPlan with err: %v", err)
return nil, err
}
- workflowModel, err := util.GetWorkflowModel(ctx, m.db, request.Spec.WorkflowId)
+ workflowModel, err := util.GetWorkflowModel(ctx, m.db, request.GetSpec().GetWorkflowId())
if err != nil {
logger.Debugf(ctx, "Failed to get workflow with id [%+v] for CreateLaunchPlan with id [%+v] with err %v",
- request.Spec.WorkflowId, request.Id)
+ request.GetSpec().GetWorkflowId(), request.GetId())
return nil, err
}
var workflowInterface core.TypedInterface
- if workflowModel.TypedInterface != nil && len(workflowModel.TypedInterface) > 0 {
+ if len(workflowModel.TypedInterface) > 0 {
err = proto.Unmarshal(workflowModel.TypedInterface, &workflowInterface)
if err != nil {
logger.Errorf(ctx,
"Failed to unmarshal TypedInterface for workflow [%+v] with err: %v",
- request.Spec.WorkflowId, err)
+ request.GetSpec().GetWorkflowId(), err)
return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal workflow inputs")
}
}
if err := validation.ValidateLaunchPlan(ctx, request, m.db, m.config.ApplicationConfiguration(), &workflowInterface); err != nil {
- logger.Debugf(ctx, "could not create launch plan: %+v, request failed validation with err: %v", request.Id, err)
+ logger.Debugf(ctx, "could not create launch plan: %+v, request failed validation with err: %v", request.GetId(), err)
return nil, err
}
- ctx = getLaunchPlanContext(ctx, request.Id)
- launchPlan := transformers.CreateLaunchPlan(request, workflowInterface.Outputs)
+ ctx = getLaunchPlanContext(ctx, request.GetId())
+ launchPlan := transformers.CreateLaunchPlan(request, workflowInterface.GetOutputs())
launchPlanDigest, err := util.GetLaunchPlanDigest(ctx, launchPlan)
if err != nil {
- logger.Errorf(ctx, "failed to compute launch plan digest for [%+v] with err: %v", launchPlan.Id, err)
+ logger.Errorf(ctx, "failed to compute launch plan digest for [%+v] with err: %v", launchPlan.GetId(), err)
return nil, err
}
- existingLaunchPlanModel, err := util.GetLaunchPlanModel(ctx, m.db, request.Id)
+ existingLaunchPlanModel, err := util.GetLaunchPlanModel(ctx, m.db, request.GetId())
if err == nil {
if bytes.Equal(existingLaunchPlanModel.Digest, launchPlanDigest) {
return nil, errors.NewLaunchPlanExistsIdenticalStructureError(ctx, request)
@@ -96,7 +96,7 @@ func (m *LaunchPlanManager) CreateLaunchPlan(
return nil, transformerErr
}
// A launch plan exists with different structure
- return nil, errors.NewLaunchPlanExistsDifferentStructureError(ctx, request, existingLaunchPlan.Spec, launchPlan.Spec)
+ return nil, errors.NewLaunchPlanExistsDifferentStructureError(ctx, request, existingLaunchPlan.GetSpec(), launchPlan.GetSpec())
}
launchPlanModel, err :=
@@ -104,12 +104,12 @@ func (m *LaunchPlanManager) CreateLaunchPlan(
if err != nil {
logger.Errorf(ctx,
"Failed to transform launch plan model [%+v], and workflow outputs [%+v] with err: %v",
- request, workflowInterface.Outputs, err)
+ request, workflowInterface.GetOutputs(), err)
return nil, err
}
err = m.db.LaunchPlanRepo().Create(ctx, launchPlanModel)
if err != nil {
- logger.Errorf(ctx, "Failed to save launch plan model %+v with err: %v", request.Id, err)
+ logger.Errorf(ctx, "Failed to save launch plan model %+v with err: %v", request.GetId(), err)
return nil, err
}
m.metrics.SpecSizeBytes.Observe(float64(len(launchPlanModel.Spec)))
@@ -143,7 +143,7 @@ func isScheduleEmpty(launchPlanSpec *admin.LaunchPlanSpec) bool {
if schedule == nil {
return true
}
- if schedule.GetCronSchedule() != nil && len(schedule.GetCronSchedule().Schedule) != 0 {
+ if schedule.GetCronSchedule() != nil && len(schedule.GetCronSchedule().GetSchedule()) != 0 {
return false
}
if len(schedule.GetCronExpression()) != 0 {
@@ -160,7 +160,7 @@ func (m *LaunchPlanManager) enableSchedule(ctx context.Context, launchPlanIdenti
addScheduleInput, err := m.scheduler.CreateScheduleInput(ctx,
m.config.ApplicationConfiguration().GetSchedulerConfig(), launchPlanIdentifier,
- launchPlanSpec.EntityMetadata.Schedule)
+ launchPlanSpec.GetEntityMetadata().GetSchedule())
if err != nil {
return err
}
@@ -223,30 +223,30 @@ func (m *LaunchPlanManager) updateSchedules(
func (m *LaunchPlanManager) disableLaunchPlan(ctx context.Context, request *admin.LaunchPlanUpdateRequest) (
*admin.LaunchPlanUpdateResponse, error) {
- if err := validation.ValidateIdentifier(request.Id, common.LaunchPlan); err != nil {
- logger.Debugf(ctx, "can't disable launch plan [%+v] with invalid identifier: %v", request.Id, err)
+ if err := validation.ValidateIdentifier(request.GetId(), common.LaunchPlan); err != nil {
+ logger.Debugf(ctx, "can't disable launch plan [%+v] with invalid identifier: %v", request.GetId(), err)
return nil, err
}
- launchPlanModel, err := util.GetLaunchPlanModel(ctx, m.db, request.Id)
+ launchPlanModel, err := util.GetLaunchPlanModel(ctx, m.db, request.GetId())
if err != nil {
- logger.Debugf(ctx, "couldn't find launch plan [%+v] to disable with err: %v", request.Id, err)
+ logger.Debugf(ctx, "couldn't find launch plan [%+v] to disable with err: %v", request.GetId(), err)
return nil, err
}
err = m.updateLaunchPlanModelState(&launchPlanModel, admin.LaunchPlanState_INACTIVE)
if err != nil {
- logger.Debugf(ctx, "failed to disable launch plan [%+v] with err: %v", request.Id, err)
+ logger.Debugf(ctx, "failed to disable launch plan [%+v] with err: %v", request.GetId(), err)
return nil, err
}
var launchPlanSpec admin.LaunchPlanSpec
err = proto.Unmarshal(launchPlanModel.Spec, &launchPlanSpec)
if err != nil {
- logger.Errorf(ctx, "failed to unmarshal launch plan spec when disabling schedule for %+v", request.Id)
+ logger.Errorf(ctx, "failed to unmarshal launch plan spec when disabling schedule for %+v", request.GetId())
return nil, errors.NewFlyteAdminErrorf(codes.Internal,
- "failed to unmarshal launch plan spec when disabling schedule for %+v", request.Id)
+ "failed to unmarshal launch plan spec when disabling schedule for %+v", request.GetId())
}
- if launchPlanSpec.EntityMetadata != nil && launchPlanSpec.EntityMetadata.Schedule != nil {
+ if launchPlanSpec.GetEntityMetadata() != nil && launchPlanSpec.GetEntityMetadata().GetSchedule() != nil {
err = m.disableSchedule(ctx, &core.Identifier{
Project: launchPlanModel.Project,
Domain: launchPlanModel.Domain,
@@ -259,23 +259,23 @@ func (m *LaunchPlanManager) disableLaunchPlan(ctx context.Context, request *admi
}
err = m.db.LaunchPlanRepo().Update(ctx, launchPlanModel)
if err != nil {
- logger.Debugf(ctx, "Failed to update launchPlanModel with ID [%+v] with err %v", request.Id, err)
+ logger.Debugf(ctx, "Failed to update launchPlanModel with ID [%+v] with err %v", request.GetId(), err)
return nil, err
}
- logger.Debugf(ctx, "disabled launch plan: [%+v]", request.Id)
+ logger.Debugf(ctx, "disabled launch plan: [%+v]", request.GetId())
return &admin.LaunchPlanUpdateResponse{}, nil
}
func (m *LaunchPlanManager) enableLaunchPlan(ctx context.Context, request *admin.LaunchPlanUpdateRequest) (
*admin.LaunchPlanUpdateResponse, error) {
newlyActiveLaunchPlanModel, err := m.db.LaunchPlanRepo().Get(ctx, repoInterfaces.Identifier{
- Project: request.Id.Project,
- Domain: request.Id.Domain,
- Name: request.Id.Name,
- Version: request.Id.Version,
+ Project: request.GetId().GetProject(),
+ Domain: request.GetId().GetDomain(),
+ Name: request.GetId().GetName(),
+ Version: request.GetId().GetVersion(),
})
if err != nil {
- logger.Debugf(ctx, "Failed to find launch plan to enable with id [%+v] and err %v", request.Id, err)
+ logger.Debugf(ctx, "Failed to find launch plan to enable with id [%+v] and err %v", request.GetId(), err)
return nil, err
}
// Set desired launch plan version to active:
@@ -298,13 +298,12 @@ func (m *LaunchPlanManager) enableLaunchPlan(ctx context.Context, request *admin
// Not found is fine, there isn't always a guaranteed active launch plan model.
if err.(errors.FlyteAdminError).Code() != codes.NotFound {
logger.Infof(ctx, "Failed to search for an active launch plan model with project: %s, domain: %s, name: %s and err %v",
- request.Id.Project, request.Id.Domain, request.Id.Name, err)
+ request.GetId().GetProject(), request.GetId().GetDomain(), request.GetId().GetName(), err)
return nil, err
}
logger.Debugf(ctx, "No active launch plan model found to disable with project: %s, domain: %s, name: %s",
- request.Id.Project, request.Id.Domain, request.Id.Name)
- } else if formerlyActiveLaunchPlanModelOutput.LaunchPlans != nil &&
- len(formerlyActiveLaunchPlanModelOutput.LaunchPlans) > 0 {
+ request.GetId().GetProject(), request.GetId().GetDomain(), request.GetId().GetName())
+ } else if len(formerlyActiveLaunchPlanModelOutput.LaunchPlans) > 0 {
formerlyActiveLaunchPlanModel = &formerlyActiveLaunchPlanModelOutput.LaunchPlans[0]
err = m.updateLaunchPlanModelState(formerlyActiveLaunchPlanModel, admin.LaunchPlanState_INACTIVE)
if err != nil {
@@ -322,7 +321,7 @@ func (m *LaunchPlanManager) enableLaunchPlan(ctx context.Context, request *admin
err = m.db.LaunchPlanRepo().SetActive(ctx, newlyActiveLaunchPlanModel, formerlyActiveLaunchPlanModel)
if err != nil {
logger.Debugf(ctx,
- "Failed to set launchPlanModel with ID [%+v] to active with err %v", request.Id, err)
+ "Failed to set launchPlanModel with ID [%+v] to active with err %v", request.GetId(), err)
return nil, err
}
return &admin.LaunchPlanUpdateResponse{}, nil
@@ -331,11 +330,11 @@ func (m *LaunchPlanManager) enableLaunchPlan(ctx context.Context, request *admin
func (m *LaunchPlanManager) UpdateLaunchPlan(ctx context.Context, request *admin.LaunchPlanUpdateRequest) (
*admin.LaunchPlanUpdateResponse, error) {
- if err := validation.ValidateIdentifier(request.Id, common.LaunchPlan); err != nil {
- logger.Debugf(ctx, "can't update launch plan [%+v] state, invalid identifier: %v", request.Id, err)
+ if err := validation.ValidateIdentifier(request.GetId(), common.LaunchPlan); err != nil {
+ logger.Debugf(ctx, "can't update launch plan [%+v] state, invalid identifier: %v", request.GetId(), err)
}
- ctx = getLaunchPlanContext(ctx, request.Id)
- switch request.State {
+ ctx = getLaunchPlanContext(ctx, request.GetId())
+ switch request.GetState() {
case admin.LaunchPlanState_INACTIVE:
return m.disableLaunchPlan(ctx, request)
case admin.LaunchPlanState_ACTIVE:
@@ -343,29 +342,29 @@ func (m *LaunchPlanManager) UpdateLaunchPlan(ctx context.Context, request *admin
default:
return nil, errors.NewFlyteAdminErrorf(
codes.InvalidArgument, "Unrecognized launch plan state %v for update for launch plan [%+v]",
- request.State, request.Id)
+ request.GetState(), request.GetId())
}
}
func (m *LaunchPlanManager) GetLaunchPlan(ctx context.Context, request *admin.ObjectGetRequest) (
*admin.LaunchPlan, error) {
- if err := validation.ValidateIdentifier(request.Id, common.LaunchPlan); err != nil {
- logger.Debugf(ctx, "can't get launch plan [%+v] with invalid identifier: %v", request.Id, err)
+ if err := validation.ValidateIdentifier(request.GetId(), common.LaunchPlan); err != nil {
+ logger.Debugf(ctx, "can't get launch plan [%+v] with invalid identifier: %v", request.GetId(), err)
return nil, err
}
- ctx = getLaunchPlanContext(ctx, request.Id)
- return util.GetLaunchPlan(ctx, m.db, request.Id)
+ ctx = getLaunchPlanContext(ctx, request.GetId())
+ return util.GetLaunchPlan(ctx, m.db, request.GetId())
}
func (m *LaunchPlanManager) GetActiveLaunchPlan(ctx context.Context, request *admin.ActiveLaunchPlanRequest) (
*admin.LaunchPlan, error) {
if err := validation.ValidateActiveLaunchPlanRequest(request); err != nil {
- logger.Debugf(ctx, "can't get active launch plan [%+v] with invalid request: %v", request.Id, err)
+ logger.Debugf(ctx, "can't get active launch plan [%+v] with invalid request: %v", request.GetId(), err)
return nil, err
}
- ctx = m.getNamedEntityContext(ctx, request.Id)
+ ctx = m.getNamedEntityContext(ctx, request.GetId())
- filters, err := util.GetActiveLaunchPlanVersionFilters(request.Id.Project, request.Id.Domain, request.Id.Name)
+ filters, err := util.GetActiveLaunchPlanVersionFilters(request.GetId().GetProject(), request.GetId().GetDomain(), request.GetId().GetName())
if err != nil {
return nil, err
}
@@ -383,7 +382,7 @@ func (m *LaunchPlanManager) GetActiveLaunchPlan(ctx context.Context, request *ad
}
if len(output.LaunchPlans) != 1 {
- return nil, errors.NewFlyteAdminErrorf(codes.NotFound, "No active launch plan could be found: %s:%s:%s", request.Id.Project, request.Id.Domain, request.Id.Name)
+ return nil, errors.NewFlyteAdminErrorf(codes.NotFound, "No active launch plan could be found: %s:%s:%s", request.GetId().GetProject(), request.GetId().GetDomain(), request.GetId().GetName())
}
return transformers.FromLaunchPlanModel(output.LaunchPlans[0])
@@ -397,30 +396,30 @@ func (m *LaunchPlanManager) ListLaunchPlans(ctx context.Context, request *admin.
logger.Debugf(ctx, "")
return nil, err
}
- ctx = m.getNamedEntityContext(ctx, request.Id)
+ ctx = m.getNamedEntityContext(ctx, request.GetId())
filters, err := util.GetDbFilters(util.FilterSpec{
- Project: request.Id.Project,
- Domain: request.Id.Domain,
- Name: request.Id.Name,
- RequestFilters: request.Filters,
+ Project: request.GetId().GetProject(),
+ Domain: request.GetId().GetDomain(),
+ Name: request.GetId().GetName(),
+ RequestFilters: request.GetFilters(),
}, common.LaunchPlan)
if err != nil {
return nil, err
}
- sortParameter, err := common.NewSortParameter(request.SortBy, models.LaunchPlanColumns)
+ sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.LaunchPlanColumns)
if err != nil {
return nil, err
}
- offset, err := validation.ValidateToken(request.Token)
+ offset, err := validation.ValidateToken(request.GetToken())
if err != nil {
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument,
- "invalid pagination token %s for ListLaunchPlans", request.Token)
+ "invalid pagination token %s for ListLaunchPlans", request.GetToken())
}
listLaunchPlansInput := repoInterfaces.ListResourceInput{
- Limit: int(request.Limit),
+ Limit: int(request.GetLimit()),
Offset: offset,
InlineFilters: filters,
SortParameter: sortParameter,
@@ -438,7 +437,7 @@ func (m *LaunchPlanManager) ListLaunchPlans(ctx context.Context, request *admin.
return nil, err
}
var token string
- if len(output.LaunchPlans) == int(request.Limit) {
+ if len(output.LaunchPlans) == int(request.GetLimit()) {
token = strconv.Itoa(offset + len(output.LaunchPlans))
}
return &admin.LaunchPlanList{
@@ -455,25 +454,25 @@ func (m *LaunchPlanManager) ListActiveLaunchPlans(ctx context.Context, request *
logger.Debugf(ctx, "")
return nil, err
}
- ctx = contextutils.WithProjectDomain(ctx, request.Project, request.Domain)
+ ctx = contextutils.WithProjectDomain(ctx, request.GetProject(), request.GetDomain())
- filters, err := util.ListActiveLaunchPlanVersionsFilters(request.Project, request.Domain)
+ filters, err := util.ListActiveLaunchPlanVersionsFilters(request.GetProject(), request.GetDomain())
if err != nil {
return nil, err
}
- sortParameter, err := common.NewSortParameter(request.SortBy, models.LaunchPlanColumns)
+ sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.LaunchPlanColumns)
if err != nil {
return nil, err
}
- offset, err := validation.ValidateToken(request.Token)
+ offset, err := validation.ValidateToken(request.GetToken())
if err != nil {
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument,
- "invalid pagination token %s for ListActiveLaunchPlans", request.Token)
+ "invalid pagination token %s for ListActiveLaunchPlans", request.GetToken())
}
listLaunchPlansInput := repoInterfaces.ListResourceInput{
- Limit: int(request.Limit),
+ Limit: int(request.GetLimit()),
Offset: offset,
InlineFilters: filters,
SortParameter: sortParameter,
@@ -491,7 +490,7 @@ func (m *LaunchPlanManager) ListActiveLaunchPlans(ctx context.Context, request *
return nil, err
}
var token string
- if len(output.LaunchPlans) == int(request.Limit) {
+ if len(output.LaunchPlans) == int(request.GetLimit()) {
token = strconv.Itoa(offset + len(output.LaunchPlans))
}
return &admin.LaunchPlanList{
@@ -503,26 +502,26 @@ func (m *LaunchPlanManager) ListActiveLaunchPlans(ctx context.Context, request *
// At least project name and domain must be specified along with limit.
func (m *LaunchPlanManager) ListLaunchPlanIds(ctx context.Context, request *admin.NamedEntityIdentifierListRequest) (
*admin.NamedEntityIdentifierList, error) {
- ctx = contextutils.WithProjectDomain(ctx, request.Project, request.Domain)
+ ctx = contextutils.WithProjectDomain(ctx, request.GetProject(), request.GetDomain())
filters, err := util.GetDbFilters(util.FilterSpec{
- Project: request.Project,
- Domain: request.Domain,
+ Project: request.GetProject(),
+ Domain: request.GetDomain(),
}, common.LaunchPlan)
if err != nil {
return nil, err
}
- sortParameter, err := common.NewSortParameter(request.SortBy, models.LaunchPlanColumns)
+ sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.LaunchPlanColumns)
if err != nil {
return nil, err
}
- offset, err := validation.ValidateToken(request.Token)
+ offset, err := validation.ValidateToken(request.GetToken())
if err != nil {
- return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid pagination token %s", request.Token)
+ return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid pagination token %s", request.GetToken())
}
listLaunchPlansInput := repoInterfaces.ListResourceInput{
- Limit: int(request.Limit),
+ Limit: int(request.GetLimit()),
Offset: offset,
InlineFilters: filters,
SortParameter: sortParameter,
@@ -535,7 +534,7 @@ func (m *LaunchPlanManager) ListLaunchPlanIds(ctx context.Context, request *admi
return nil, err
}
var token string
- if len(output.LaunchPlans) == int(request.Limit) {
+ if len(output.LaunchPlans) == int(request.GetLimit()) {
token = strconv.Itoa(offset + len(output.LaunchPlans))
}
return &admin.NamedEntityIdentifierList{
diff --git a/flyteadmin/pkg/manager/impl/launch_plan_manager_test.go b/flyteadmin/pkg/manager/impl/launch_plan_manager_test.go
index d40d7c5e1f..3d551c4bc6 100644
--- a/flyteadmin/pkg/manager/impl/launch_plan_manager_test.go
+++ b/flyteadmin/pkg/manager/impl/launch_plan_manager_test.go
@@ -59,7 +59,7 @@ func getMockConfigForLpTest() runtimeInterfaces.Configuration {
func setDefaultWorkflowCallbackForLpTest(repository interfaces.Repository) {
workflowSpec := testutils.GetSampleWorkflowSpecForTest()
- typedInterface, _ := proto.Marshal(workflowSpec.Template.Interface)
+ typedInterface, _ := proto.Marshal(workflowSpec.GetTemplate().GetInterface())
workflowGetFunc := func(input interfaces.Identifier) (models.Workflow, error) {
return models.Workflow{
WorkflowKey: models.WorkflowKey{
@@ -107,10 +107,10 @@ func TestLaunchPlanManager_GetLaunchPlan(t *testing.T) {
workflowRequest := testutils.GetWorkflowRequest()
closure := admin.LaunchPlanClosure{
- ExpectedInputs: lpRequest.Spec.DefaultInputs,
- ExpectedOutputs: workflowRequest.Spec.Template.Interface.Outputs,
+ ExpectedInputs: lpRequest.GetSpec().GetDefaultInputs(),
+ ExpectedOutputs: workflowRequest.GetSpec().GetTemplate().GetInterface().GetOutputs(),
}
- specBytes, _ := proto.Marshal(lpRequest.Spec)
+ specBytes, _ := proto.Marshal(lpRequest.GetSpec())
closureBytes, _ := proto.Marshal(&closure)
launchPlanGetFunc := func(input interfaces.Identifier) (models.LaunchPlan, error) {
@@ -143,10 +143,10 @@ func TestLaunchPlanManager_GetActiveLaunchPlan(t *testing.T) {
workflowRequest := testutils.GetWorkflowRequest()
closure := admin.LaunchPlanClosure{
- ExpectedInputs: lpRequest.Spec.DefaultInputs,
- ExpectedOutputs: workflowRequest.Spec.Template.Interface.Outputs,
+ ExpectedInputs: lpRequest.GetSpec().GetDefaultInputs(),
+ ExpectedOutputs: workflowRequest.GetSpec().GetTemplate().GetInterface().GetOutputs(),
}
- specBytes, _ := proto.Marshal(lpRequest.Spec)
+ specBytes, _ := proto.Marshal(lpRequest.GetSpec())
closureBytes, _ := proto.Marshal(&closure)
launchPlanListFunc := func(input interfaces.ListResourceInput) (interfaces.LaunchPlanCollectionOutput, error) {
@@ -169,10 +169,10 @@ func TestLaunchPlanManager_GetActiveLaunchPlan(t *testing.T) {
LaunchPlans: []models.LaunchPlan{
{
LaunchPlanKey: models.LaunchPlanKey{
- Project: lpRequest.Id.Project,
- Domain: lpRequest.Id.Domain,
- Name: lpRequest.Id.Name,
- Version: lpRequest.Id.Version,
+ Project: lpRequest.GetId().GetProject(),
+ Domain: lpRequest.GetId().GetDomain(),
+ Name: lpRequest.GetId().GetName(),
+ Version: lpRequest.GetId().GetVersion(),
},
Spec: specBytes,
Closure: closureBytes,
@@ -185,9 +185,9 @@ func TestLaunchPlanManager_GetActiveLaunchPlan(t *testing.T) {
repository.LaunchPlanRepo().(*repositoryMocks.MockLaunchPlanRepo).SetListCallback(launchPlanListFunc)
response, err := lpManager.GetActiveLaunchPlan(context.Background(), &admin.ActiveLaunchPlanRequest{
Id: &admin.NamedEntityIdentifier{
- Project: lpRequest.Id.Project,
- Domain: lpRequest.Id.Domain,
- Name: lpRequest.Id.Name,
+ Project: lpRequest.GetId().GetProject(),
+ Domain: lpRequest.GetId().GetDomain(),
+ Name: lpRequest.GetId().GetName(),
},
})
assert.NoError(t, err)
@@ -205,9 +205,9 @@ func TestLaunchPlanManager_GetActiveLaunchPlan_NoneActive(t *testing.T) {
repository.LaunchPlanRepo().(*repositoryMocks.MockLaunchPlanRepo).SetListCallback(launchPlanListFunc)
response, err := lpManager.GetActiveLaunchPlan(context.Background(), &admin.ActiveLaunchPlanRequest{
Id: &admin.NamedEntityIdentifier{
- Project: lpRequest.Id.Project,
- Domain: lpRequest.Id.Domain,
- Name: lpRequest.Id.Name,
+ Project: lpRequest.GetId().GetProject(),
+ Domain: lpRequest.GetId().GetDomain(),
+ Name: lpRequest.GetId().GetName(),
},
})
assert.EqualError(t, err, "No active launch plan could be found: project:domain:name")
@@ -298,11 +298,11 @@ func TestCreateLaunchPlanValidateCreate(t *testing.T) {
setDefaultWorkflowCallbackForLpTest(repository)
lpCreateFunc := func(input models.LaunchPlan) error {
launchPlan, _ := transformers.FromLaunchPlanModel(input)
- assert.Equal(t, project, launchPlan.Id.Project)
- assert.Equal(t, domain, launchPlan.Id.Domain)
- assert.Equal(t, name, launchPlan.Id.Name)
- assert.Equal(t, version, launchPlan.Id.Version)
- assert.True(t, proto.Equal(testutils.GetLaunchPlanRequest().Spec, launchPlan.Spec))
+ assert.Equal(t, project, launchPlan.GetId().GetProject())
+ assert.Equal(t, domain, launchPlan.GetId().GetDomain())
+ assert.Equal(t, name, launchPlan.GetId().GetName())
+ assert.Equal(t, version, launchPlan.GetId().GetVersion())
+ assert.True(t, proto.Equal(testutils.GetLaunchPlanRequest().GetSpec(), launchPlan.GetSpec()))
expectedInputs := &core.ParameterMap{
Parameters: map[string]*core.Parameter{
"foo": {
@@ -315,9 +315,9 @@ func TestCreateLaunchPlanValidateCreate(t *testing.T) {
},
},
}
- assert.True(t, proto.Equal(expectedInputs, launchPlan.Closure.ExpectedInputs))
- assert.True(t, proto.Equal(testutils.GetSampleWorkflowSpecForTest().Template.Interface.Outputs,
- launchPlan.Closure.ExpectedOutputs))
+ assert.True(t, proto.Equal(expectedInputs, launchPlan.GetClosure().GetExpectedInputs()))
+ assert.True(t, proto.Equal(testutils.GetSampleWorkflowSpecForTest().GetTemplate().GetInterface().GetOutputs(),
+ launchPlan.GetClosure().GetExpectedOutputs()))
return nil
}
repository.LaunchPlanRepo().(*repositoryMocks.MockLaunchPlanRepo).SetCreateCallback(lpCreateFunc)
@@ -350,15 +350,15 @@ func TestCreateLaunchPlanNoWorkflowInterface(t *testing.T) {
repository.WorkflowRepo().(*repositoryMocks.MockWorkflowRepo).SetGetCallback(workflowGetFunc)
lpCreateFunc := func(input models.LaunchPlan) error {
launchPlan, _ := transformers.FromLaunchPlanModel(input)
- assert.Equal(t, project, launchPlan.Id.Project)
- assert.Equal(t, domain, launchPlan.Id.Domain)
- assert.Equal(t, name, launchPlan.Id.Name)
- assert.Equal(t, version, launchPlan.Id.Version)
- expectedLaunchPlanSpec := testutils.GetLaunchPlanRequest().Spec
+ assert.Equal(t, project, launchPlan.GetId().GetProject())
+ assert.Equal(t, domain, launchPlan.GetId().GetDomain())
+ assert.Equal(t, name, launchPlan.GetId().GetName())
+ assert.Equal(t, version, launchPlan.GetId().GetVersion())
+ expectedLaunchPlanSpec := testutils.GetLaunchPlanRequest().GetSpec()
expectedLaunchPlanSpec.FixedInputs = nil
expectedLaunchPlanSpec.DefaultInputs.Parameters = map[string]*core.Parameter{}
- assert.EqualValues(t, expectedLaunchPlanSpec.String(), launchPlan.Spec.String())
- assert.Empty(t, launchPlan.Closure.ExpectedInputs)
+ assert.EqualValues(t, expectedLaunchPlanSpec.String(), launchPlan.GetSpec().String())
+ assert.Empty(t, launchPlan.GetClosure().GetExpectedInputs())
return nil
}
repository.LaunchPlanRepo().(*repositoryMocks.MockLaunchPlanRepo).SetCreateCallback(lpCreateFunc)
@@ -1058,10 +1058,10 @@ func TestLaunchPlanManager_ListLaunchPlans(t *testing.T) {
workflowRequest := testutils.GetWorkflowRequest()
closure := admin.LaunchPlanClosure{
- ExpectedInputs: lpRequest.Spec.DefaultInputs,
- ExpectedOutputs: workflowRequest.Spec.Template.Interface.Outputs,
+ ExpectedInputs: lpRequest.GetSpec().GetDefaultInputs(),
+ ExpectedOutputs: workflowRequest.GetSpec().GetTemplate().GetInterface().GetOutputs(),
}
- specBytes, _ := proto.Marshal(lpRequest.Spec)
+ specBytes, _ := proto.Marshal(lpRequest.GetSpec())
closureBytes, _ := proto.Marshal(&closure)
createdAt := time.Now()
@@ -1146,14 +1146,14 @@ func TestLaunchPlanManager_ListLaunchPlans(t *testing.T) {
},
})
assert.NoError(t, err)
- assert.Equal(t, 2, len(lpList.LaunchPlans))
- for idx, lp := range lpList.LaunchPlans {
- assert.Equal(t, project, lp.Id.Project)
- assert.Equal(t, domain, lp.Id.Domain)
- assert.Equal(t, name, lp.Id.Name)
- assert.Equal(t, fmt.Sprintf("%v", idx+1), lp.Id.Version)
- assert.True(t, proto.Equal(createdAtProto, lp.Closure.CreatedAt))
- assert.True(t, proto.Equal(updatedAtProto, lp.Closure.UpdatedAt))
+ assert.Equal(t, 2, len(lpList.GetLaunchPlans()))
+ for idx, lp := range lpList.GetLaunchPlans() {
+ assert.Equal(t, project, lp.GetId().GetProject())
+ assert.Equal(t, domain, lp.GetId().GetDomain())
+ assert.Equal(t, name, lp.GetId().GetName())
+ assert.Equal(t, fmt.Sprintf("%v", idx+1), lp.GetId().GetVersion())
+ assert.True(t, proto.Equal(createdAtProto, lp.GetClosure().GetCreatedAt()))
+ assert.True(t, proto.Equal(updatedAtProto, lp.GetClosure().GetUpdatedAt()))
}
}
@@ -1165,10 +1165,10 @@ func TestLaunchPlanManager_ListLaunchPlanIds(t *testing.T) {
workflowRequest := testutils.GetWorkflowRequest()
closure := admin.LaunchPlanClosure{
- ExpectedInputs: lpRequest.Spec.DefaultInputs,
- ExpectedOutputs: workflowRequest.Spec.Template.Interface.Outputs,
+ ExpectedInputs: lpRequest.GetSpec().GetDefaultInputs(),
+ ExpectedOutputs: workflowRequest.GetSpec().GetTemplate().GetInterface().GetOutputs(),
}
- specBytes, _ := proto.Marshal(lpRequest.Spec)
+ specBytes, _ := proto.Marshal(lpRequest.GetSpec())
closureBytes, _ := proto.Marshal(&closure)
launchPlanListFunc := func(input interfaces.ListResourceInput) (
@@ -1232,11 +1232,11 @@ func TestLaunchPlanManager_ListLaunchPlanIds(t *testing.T) {
},
})
assert.NoError(t, err)
- assert.Equal(t, 2, len(lpList.Entities))
- for _, id := range lpList.Entities {
- assert.Equal(t, project, id.Project)
- assert.Equal(t, domain, id.Domain)
- assert.Equal(t, name, id.Name)
+ assert.Equal(t, 2, len(lpList.GetEntities()))
+ for _, id := range lpList.GetEntities() {
+ assert.Equal(t, project, id.GetProject())
+ assert.Equal(t, domain, id.GetDomain())
+ assert.Equal(t, name, id.GetName())
}
}
@@ -1248,10 +1248,10 @@ func TestLaunchPlanManager_ListActiveLaunchPlans(t *testing.T) {
workflowRequest := testutils.GetWorkflowRequest()
closure := admin.LaunchPlanClosure{
- ExpectedInputs: lpRequest.Spec.DefaultInputs,
- ExpectedOutputs: workflowRequest.Spec.Template.Interface.Outputs,
+ ExpectedInputs: lpRequest.GetSpec().GetDefaultInputs(),
+ ExpectedOutputs: workflowRequest.GetSpec().GetTemplate().GetInterface().GetOutputs(),
}
- specBytes, _ := proto.Marshal(lpRequest.Spec)
+ specBytes, _ := proto.Marshal(lpRequest.GetSpec())
closureBytes, _ := proto.Marshal(&closure)
launchPlanListFunc := func(input interfaces.ListResourceInput) (
@@ -1319,11 +1319,11 @@ func TestLaunchPlanManager_ListActiveLaunchPlans(t *testing.T) {
},
})
assert.NoError(t, err)
- assert.Equal(t, 2, len(lpList.LaunchPlans))
- for _, id := range lpList.LaunchPlans {
- assert.Equal(t, project, id.Id.Project)
- assert.Equal(t, domain, id.Id.Domain)
- assert.Equal(t, name, id.Id.Name)
+ assert.Equal(t, 2, len(lpList.GetLaunchPlans()))
+ for _, id := range lpList.GetLaunchPlans() {
+ assert.Equal(t, project, id.GetId().GetProject())
+ assert.Equal(t, domain, id.GetId().GetDomain())
+ assert.Equal(t, name, id.GetId().GetName())
}
}
diff --git a/flyteadmin/pkg/manager/impl/metrics_manager.go b/flyteadmin/pkg/manager/impl/metrics_manager.go
index 231909f4e8..a689c60a70 100644
--- a/flyteadmin/pkg/manager/impl/metrics_manager.go
+++ b/flyteadmin/pkg/manager/impl/metrics_manager.go
@@ -70,18 +70,18 @@ func createOperationSpan(startTime, endTime *timestamp.Timestamp, operation stri
// getBranchNode searches the provided BranchNode definition for the Node identified by nodeID.
func getBranchNode(nodeID string, branchNode *core.BranchNode) *core.Node {
- if branchNode.IfElse.Case.ThenNode.Id == nodeID {
- return branchNode.IfElse.Case.ThenNode
+ if branchNode.GetIfElse().GetCase().GetThenNode().GetId() == nodeID {
+ return branchNode.GetIfElse().GetCase().GetThenNode()
}
- for _, other := range branchNode.IfElse.Other {
- if other.ThenNode.Id == nodeID {
- return other.ThenNode
+ for _, other := range branchNode.GetIfElse().GetOther() {
+ if other.GetThenNode().GetId() == nodeID {
+ return other.GetThenNode()
}
}
- if elseNode, ok := branchNode.IfElse.Default.(*core.IfElseBlock_ElseNode); ok {
- if elseNode.ElseNode.Id == nodeID {
+ if elseNode, ok := branchNode.GetIfElse().GetDefault().(*core.IfElseBlock_ElseNode); ok {
+ if elseNode.ElseNode.GetId() == nodeID {
return elseNode.ElseNode
}
}
@@ -98,13 +98,13 @@ func (m *MetricsManager) getLatestUpstreamNodeExecution(nodeID string, upstreamN
var nodeExecution *admin.NodeExecution
var latestUpstreamUpdatedAt = time.Unix(0, 0)
if connectionSet, exists := upstreamNodeIds[nodeID]; exists {
- for _, upstreamNodeID := range connectionSet.Ids {
+ for _, upstreamNodeID := range connectionSet.GetIds() {
upstreamNodeExecution, exists := nodeExecutions[upstreamNodeID]
if !exists {
continue
}
- t := upstreamNodeExecution.Closure.UpdatedAt.AsTime()
+ t := upstreamNodeExecution.GetClosure().GetUpdatedAt().AsTime()
if t.After(latestUpstreamUpdatedAt) {
nodeExecution = upstreamNodeExecution
latestUpstreamUpdatedAt = t
@@ -124,15 +124,15 @@ func (m *MetricsManager) getNodeExecutions(ctx context.Context, request *admin.N
return nil, err
}
- for _, nodeExecution := range response.NodeExecutions {
- nodeExecutions[nodeExecution.Metadata.SpecNodeId] = nodeExecution
+ for _, nodeExecution := range response.GetNodeExecutions() {
+ nodeExecutions[nodeExecution.GetMetadata().GetSpecNodeId()] = nodeExecution
}
- if len(response.NodeExecutions) < int(request.Limit) {
+ if len(response.GetNodeExecutions()) < int(request.GetLimit()) {
break
}
- request.Token = response.Token
+ request.Token = response.GetToken()
}
return nodeExecutions, nil
@@ -147,13 +147,13 @@ func (m *MetricsManager) getTaskExecutions(ctx context.Context, request *admin.T
return nil, err
}
- taskExecutions = append(taskExecutions, response.TaskExecutions...)
+ taskExecutions = append(taskExecutions, response.GetTaskExecutions()...)
- if len(response.TaskExecutions) < int(request.Limit) {
+ if len(response.GetTaskExecutions()) < int(request.GetLimit()) {
break
}
- request.Token = response.Token
+ request.Token = response.GetToken()
}
return taskExecutions, nil
@@ -166,9 +166,9 @@ func (m *MetricsManager) parseBranchNodeExecution(ctx context.Context,
// retrieve node execution(s)
nodeExecutions, err := m.getNodeExecutions(ctx, &admin.NodeExecutionListRequest{
- WorkflowExecutionId: nodeExecution.Id.ExecutionId,
+ WorkflowExecutionId: nodeExecution.GetId().GetExecutionId(),
Limit: RequestLimit,
- UniqueParentId: nodeExecution.Id.NodeId,
+ UniqueParentId: nodeExecution.GetId().GetNodeId(),
})
if err != nil {
return err
@@ -176,7 +176,7 @@ func (m *MetricsManager) parseBranchNodeExecution(ctx context.Context,
// check if the node started
if len(nodeExecutions) == 0 {
- *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, nodeExecution.Closure.UpdatedAt, nodeSetup))
+ *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetCreatedAt(), nodeExecution.GetClosure().GetUpdatedAt(), nodeSetup))
} else {
// parse branchNode
if len(nodeExecutions) != 1 {
@@ -188,14 +188,14 @@ func (m *MetricsManager) parseBranchNodeExecution(ctx context.Context,
branchNodeExecution = e
}
- node := getBranchNode(branchNodeExecution.Metadata.SpecNodeId, branchNode)
+ node := getBranchNode(branchNodeExecution.GetMetadata().GetSpecNodeId(), branchNode)
if node == nil {
return fmt.Errorf("failed to identify branch node final node definition for nodeID '%s' and branchNode '%+v'",
- branchNodeExecution.Metadata.SpecNodeId, branchNode)
+ branchNodeExecution.GetMetadata().GetSpecNodeId(), branchNode)
}
// frontend overhead
- *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, branchNodeExecution.Closure.CreatedAt, nodeSetup))
+ *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetCreatedAt(), branchNodeExecution.GetClosure().GetCreatedAt(), nodeSetup))
// node execution
nodeExecutionSpan, err := m.parseNodeExecution(ctx, branchNodeExecution, node, depth)
@@ -206,9 +206,9 @@ func (m *MetricsManager) parseBranchNodeExecution(ctx context.Context,
*spans = append(*spans, nodeExecutionSpan)
// backend overhead
- if !nodeExecution.Closure.UpdatedAt.AsTime().Before(branchNodeExecution.Closure.UpdatedAt.AsTime()) {
- *spans = append(*spans, createOperationSpan(branchNodeExecution.Closure.UpdatedAt,
- nodeExecution.Closure.UpdatedAt, nodeTeardown))
+ if !nodeExecution.GetClosure().GetUpdatedAt().AsTime().Before(branchNodeExecution.GetClosure().GetUpdatedAt().AsTime()) {
+ *spans = append(*spans, createOperationSpan(branchNodeExecution.GetClosure().GetUpdatedAt(),
+ nodeExecution.GetClosure().GetUpdatedAt(), nodeTeardown))
}
}
@@ -219,7 +219,7 @@ func (m *MetricsManager) parseBranchNodeExecution(ctx context.Context,
// which are appended to the provided spans argument.
func (m *MetricsManager) parseDynamicNodeExecution(ctx context.Context, nodeExecution *admin.NodeExecution, spans *[]*core.Span, depth int) error {
taskExecutions, err := m.getTaskExecutions(ctx, &admin.TaskExecutionListRequest{
- NodeExecutionId: nodeExecution.Id,
+ NodeExecutionId: nodeExecution.GetId(),
Limit: RequestLimit,
})
if err != nil {
@@ -228,18 +228,18 @@ func (m *MetricsManager) parseDynamicNodeExecution(ctx context.Context, nodeExec
// if no task executions then everything is execution overhead
if len(taskExecutions) == 0 {
- *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, nodeExecution.Closure.UpdatedAt, nodeSetup))
+ *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetCreatedAt(), nodeExecution.GetClosure().GetUpdatedAt(), nodeSetup))
} else {
// frontend overhead
- *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, taskExecutions[0].Closure.CreatedAt, nodeSetup))
+ *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetCreatedAt(), taskExecutions[0].GetClosure().GetCreatedAt(), nodeSetup))
// task execution(s)
parseTaskExecutions(taskExecutions, spans, depth)
nodeExecutions, err := m.getNodeExecutions(ctx, &admin.NodeExecutionListRequest{
- WorkflowExecutionId: nodeExecution.Id.ExecutionId,
+ WorkflowExecutionId: nodeExecution.GetId().GetExecutionId(),
Limit: RequestLimit,
- UniqueParentId: nodeExecution.Id.NodeId,
+ UniqueParentId: nodeExecution.GetId().GetNodeId(),
})
if err != nil {
return err
@@ -247,31 +247,31 @@ func (m *MetricsManager) parseDynamicNodeExecution(ctx context.Context, nodeExec
lastTask := taskExecutions[len(taskExecutions)-1]
if len(nodeExecutions) == 0 {
- if !nodeExecution.Closure.UpdatedAt.AsTime().Before(lastTask.Closure.UpdatedAt.AsTime()) {
- *spans = append(*spans, createOperationSpan(lastTask.Closure.UpdatedAt, nodeExecution.Closure.UpdatedAt, nodeReset))
+ if !nodeExecution.GetClosure().GetUpdatedAt().AsTime().Before(lastTask.GetClosure().GetUpdatedAt().AsTime()) {
+ *spans = append(*spans, createOperationSpan(lastTask.GetClosure().GetUpdatedAt(), nodeExecution.GetClosure().GetUpdatedAt(), nodeReset))
}
} else {
// between task execution(s) and node execution(s) overhead
startNode := nodeExecutions[v1alpha1.StartNodeID]
- *spans = append(*spans, createOperationSpan(taskExecutions[len(taskExecutions)-1].Closure.UpdatedAt,
- startNode.Closure.UpdatedAt, nodeReset))
+ *spans = append(*spans, createOperationSpan(taskExecutions[len(taskExecutions)-1].GetClosure().GetUpdatedAt(),
+ startNode.GetClosure().GetUpdatedAt(), nodeReset))
// node execution(s)
- getDataRequest := &admin.NodeExecutionGetDataRequest{Id: nodeExecution.Id}
+ getDataRequest := &admin.NodeExecutionGetDataRequest{Id: nodeExecution.GetId()}
nodeExecutionData, err := m.nodeExecutionManager.GetNodeExecutionData(ctx, getDataRequest)
if err != nil {
return err
}
- if err := m.parseNodeExecutions(ctx, nodeExecutions, nodeExecutionData.DynamicWorkflow.CompiledWorkflow, spans, depth); err != nil {
+ if err := m.parseNodeExecutions(ctx, nodeExecutions, nodeExecutionData.GetDynamicWorkflow().GetCompiledWorkflow(), spans, depth); err != nil {
return err
}
// backend overhead
latestUpstreamNode := m.getLatestUpstreamNodeExecution(v1alpha1.EndNodeID,
- nodeExecutionData.DynamicWorkflow.CompiledWorkflow.Primary.Connections.Upstream, nodeExecutions)
- if latestUpstreamNode != nil && !nodeExecution.Closure.UpdatedAt.AsTime().Before(latestUpstreamNode.Closure.UpdatedAt.AsTime()) {
- *spans = append(*spans, createOperationSpan(latestUpstreamNode.Closure.UpdatedAt, nodeExecution.Closure.UpdatedAt, nodeTeardown))
+ nodeExecutionData.GetDynamicWorkflow().GetCompiledWorkflow().GetPrimary().GetConnections().GetUpstream(), nodeExecutions)
+ if latestUpstreamNode != nil && !nodeExecution.GetClosure().GetUpdatedAt().AsTime().Before(latestUpstreamNode.GetClosure().GetUpdatedAt().AsTime()) {
+ *spans = append(*spans, createOperationSpan(latestUpstreamNode.GetClosure().GetUpdatedAt(), nodeExecution.GetClosure().GetUpdatedAt(), nodeTeardown))
}
}
}
@@ -285,14 +285,14 @@ func (m *MetricsManager) parseExecution(ctx context.Context, execution *admin.Ex
spans := make([]*core.Span, 0)
if depth != 0 {
// retrieve workflow and node executions
- workflowRequest := &admin.ObjectGetRequest{Id: execution.Closure.WorkflowId}
+ workflowRequest := &admin.ObjectGetRequest{Id: execution.GetClosure().GetWorkflowId()}
workflow, err := m.workflowManager.GetWorkflow(ctx, workflowRequest)
if err != nil {
return nil, err
}
nodeExecutions, err := m.getNodeExecutions(ctx, &admin.NodeExecutionListRequest{
- WorkflowExecutionId: execution.Id,
+ WorkflowExecutionId: execution.GetId(),
Limit: RequestLimit,
})
if err != nil {
@@ -301,32 +301,32 @@ func (m *MetricsManager) parseExecution(ctx context.Context, execution *admin.Ex
// check if workflow has started
startNode := nodeExecutions[v1alpha1.StartNodeID]
- if startNode.Closure.UpdatedAt == nil || reflect.DeepEqual(startNode.Closure.UpdatedAt, emptyTimestamp) {
- spans = append(spans, createOperationSpan(execution.Closure.CreatedAt, execution.Closure.UpdatedAt, workflowSetup))
+ if startNode.GetClosure().GetUpdatedAt() == nil || reflect.DeepEqual(startNode.GetClosure().GetUpdatedAt(), emptyTimestamp) {
+ spans = append(spans, createOperationSpan(execution.GetClosure().GetCreatedAt(), execution.GetClosure().GetUpdatedAt(), workflowSetup))
} else {
// compute frontend overhead
- spans = append(spans, createOperationSpan(execution.Closure.CreatedAt, startNode.Closure.UpdatedAt, workflowSetup))
+ spans = append(spans, createOperationSpan(execution.GetClosure().GetCreatedAt(), startNode.GetClosure().GetUpdatedAt(), workflowSetup))
// iterate over nodes and compute overhead
- if err := m.parseNodeExecutions(ctx, nodeExecutions, workflow.Closure.CompiledWorkflow, &spans, depth-1); err != nil {
+ if err := m.parseNodeExecutions(ctx, nodeExecutions, workflow.GetClosure().GetCompiledWorkflow(), &spans, depth-1); err != nil {
return nil, err
}
// compute backend overhead
latestUpstreamNode := m.getLatestUpstreamNodeExecution(v1alpha1.EndNodeID,
- workflow.Closure.CompiledWorkflow.Primary.Connections.Upstream, nodeExecutions)
- if latestUpstreamNode != nil && !execution.Closure.UpdatedAt.AsTime().Before(latestUpstreamNode.Closure.UpdatedAt.AsTime()) {
- spans = append(spans, createOperationSpan(latestUpstreamNode.Closure.UpdatedAt,
- execution.Closure.UpdatedAt, workflowTeardown))
+ workflow.GetClosure().GetCompiledWorkflow().GetPrimary().GetConnections().GetUpstream(), nodeExecutions)
+ if latestUpstreamNode != nil && !execution.GetClosure().GetUpdatedAt().AsTime().Before(latestUpstreamNode.GetClosure().GetUpdatedAt().AsTime()) {
+ spans = append(spans, createOperationSpan(latestUpstreamNode.GetClosure().GetUpdatedAt(),
+ execution.GetClosure().GetUpdatedAt(), workflowTeardown))
}
}
}
return &core.Span{
- StartTime: execution.Closure.CreatedAt,
- EndTime: execution.Closure.UpdatedAt,
+ StartTime: execution.GetClosure().GetCreatedAt(),
+ EndTime: execution.GetClosure().GetUpdatedAt(),
Id: &core.Span_WorkflowId{
- WorkflowId: execution.Id,
+ WorkflowId: execution.GetId(),
},
Spans: spans,
}, nil
@@ -336,23 +336,23 @@ func (m *MetricsManager) parseExecution(ctx context.Context, execution *admin.Ex
// which are appended to the provided spans argument.
func (m *MetricsManager) parseGateNodeExecution(_ context.Context, nodeExecution *admin.NodeExecution, spans *[]*core.Span) {
// check if node has started yet
- if nodeExecution.Closure.StartedAt == nil || reflect.DeepEqual(nodeExecution.Closure.StartedAt, emptyTimestamp) {
- *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, nodeExecution.Closure.UpdatedAt, nodeSetup))
+ if nodeExecution.GetClosure().GetStartedAt() == nil || reflect.DeepEqual(nodeExecution.GetClosure().GetStartedAt(), emptyTimestamp) {
+ *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetCreatedAt(), nodeExecution.GetClosure().GetUpdatedAt(), nodeSetup))
} else {
// frontend overhead
- *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, nodeExecution.Closure.StartedAt, nodeSetup))
+ *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetCreatedAt(), nodeExecution.GetClosure().GetStartedAt(), nodeSetup))
// check if plugin has completed yet
- if nodeExecution.Closure.Duration == nil || reflect.DeepEqual(nodeExecution.Closure.Duration, emptyDuration) {
- *spans = append(*spans, createOperationSpan(nodeExecution.Closure.StartedAt,
- nodeExecution.Closure.UpdatedAt, nodeIdle))
+ if nodeExecution.GetClosure().GetDuration() == nil || reflect.DeepEqual(nodeExecution.GetClosure().GetDuration(), emptyDuration) {
+ *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetStartedAt(),
+ nodeExecution.GetClosure().GetUpdatedAt(), nodeIdle))
} else {
// idle time
- nodeEndTime := timestamppb.New(nodeExecution.Closure.StartedAt.AsTime().Add(nodeExecution.Closure.Duration.AsDuration()))
- *spans = append(*spans, createOperationSpan(nodeExecution.Closure.StartedAt, nodeEndTime, nodeIdle))
+ nodeEndTime := timestamppb.New(nodeExecution.GetClosure().GetStartedAt().AsTime().Add(nodeExecution.GetClosure().GetDuration().AsDuration()))
+ *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetStartedAt(), nodeEndTime, nodeIdle))
// backend overhead
- *spans = append(*spans, createOperationSpan(nodeEndTime, nodeExecution.Closure.UpdatedAt, nodeTeardown))
+ *spans = append(*spans, createOperationSpan(nodeEndTime, nodeExecution.GetClosure().GetUpdatedAt(), nodeTeardown))
}
}
}
@@ -361,19 +361,19 @@ func (m *MetricsManager) parseGateNodeExecution(_ context.Context, nodeExecution
// Spans which are appended to the provided spans argument.
func (m *MetricsManager) parseLaunchPlanNodeExecution(ctx context.Context, nodeExecution *admin.NodeExecution, spans *[]*core.Span, depth int) error {
// check if workflow started yet
- workflowNode := nodeExecution.Closure.GetWorkflowNodeMetadata()
+ workflowNode := nodeExecution.GetClosure().GetWorkflowNodeMetadata()
if workflowNode == nil {
- *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, nodeExecution.Closure.UpdatedAt, nodeSetup))
+ *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetCreatedAt(), nodeExecution.GetClosure().GetUpdatedAt(), nodeSetup))
} else {
// retrieve execution
- executionRequest := &admin.WorkflowExecutionGetRequest{Id: workflowNode.ExecutionId}
+ executionRequest := &admin.WorkflowExecutionGetRequest{Id: workflowNode.GetExecutionId()}
execution, err := m.executionManager.GetExecution(ctx, executionRequest)
if err != nil {
return err
}
// frontend overhead
- *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, execution.Closure.CreatedAt, nodeSetup))
+ *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetCreatedAt(), execution.GetClosure().GetCreatedAt(), nodeSetup))
// execution
span, err := m.parseExecution(ctx, execution, depth)
@@ -384,8 +384,8 @@ func (m *MetricsManager) parseLaunchPlanNodeExecution(ctx context.Context, nodeE
*spans = append(*spans, span)
// backend overhead
- if !nodeExecution.Closure.UpdatedAt.AsTime().Before(execution.Closure.UpdatedAt.AsTime()) {
- *spans = append(*spans, createOperationSpan(execution.Closure.UpdatedAt, nodeExecution.Closure.UpdatedAt, nodeTeardown))
+ if !nodeExecution.GetClosure().GetUpdatedAt().AsTime().Before(execution.GetClosure().GetUpdatedAt().AsTime()) {
+ *spans = append(*spans, createOperationSpan(execution.GetClosure().GetUpdatedAt(), nodeExecution.GetClosure().GetUpdatedAt(), nodeTeardown))
}
}
@@ -400,7 +400,7 @@ func (m *MetricsManager) parseNodeExecution(ctx context.Context, nodeExecution *
// parse node
var err error
- switch target := node.Target.(type) {
+ switch target := node.GetTarget().(type) {
case *core.Node_BranchNode:
// handle branch node
err = m.parseBranchNodeExecution(ctx, nodeExecution, target.BranchNode, &spans, depth-1)
@@ -408,7 +408,7 @@ func (m *MetricsManager) parseNodeExecution(ctx context.Context, nodeExecution *
// handle gate node
m.parseGateNodeExecution(ctx, nodeExecution, &spans)
case *core.Node_TaskNode:
- if nodeExecution.Metadata.IsParentNode {
+ if nodeExecution.GetMetadata().GetIsParentNode() {
// handle dynamic node
err = m.parseDynamicNodeExecution(ctx, nodeExecution, &spans, depth-1)
} else {
@@ -416,7 +416,7 @@ func (m *MetricsManager) parseNodeExecution(ctx context.Context, nodeExecution *
err = m.parseTaskNodeExecution(ctx, nodeExecution, &spans, depth-1)
}
case *core.Node_WorkflowNode:
- switch workflow := target.WorkflowNode.Reference.(type) {
+ switch workflow := target.WorkflowNode.GetReference().(type) {
case *core.WorkflowNode_LaunchplanRef:
// handle launch plan
err = m.parseLaunchPlanNodeExecution(ctx, nodeExecution, &spans, depth-1)
@@ -436,10 +436,10 @@ func (m *MetricsManager) parseNodeExecution(ctx context.Context, nodeExecution *
}
return &core.Span{
- StartTime: nodeExecution.Closure.CreatedAt,
- EndTime: nodeExecution.Closure.UpdatedAt,
+ StartTime: nodeExecution.GetClosure().GetCreatedAt(),
+ EndTime: nodeExecution.GetClosure().GetUpdatedAt(),
Id: &core.Span_NodeId{
- NodeId: nodeExecution.Id,
+ NodeId: nodeExecution.GetId(),
},
Spans: spans,
}, nil
@@ -456,29 +456,29 @@ func (m *MetricsManager) parseNodeExecutions(ctx context.Context, nodeExecutions
sortedNodeExecutions = append(sortedNodeExecutions, nodeExecution)
}
sort.Slice(sortedNodeExecutions, func(i, j int) bool {
- x := sortedNodeExecutions[i].Closure.CreatedAt.AsTime()
- y := sortedNodeExecutions[j].Closure.CreatedAt.AsTime()
+ x := sortedNodeExecutions[i].GetClosure().GetCreatedAt().AsTime()
+ y := sortedNodeExecutions[j].GetClosure().GetCreatedAt().AsTime()
return x.Before(y)
})
// iterate over sorted node executions
for _, nodeExecution := range sortedNodeExecutions {
- specNodeID := nodeExecution.Metadata.SpecNodeId
+ specNodeID := nodeExecution.GetMetadata().GetSpecNodeId()
if specNodeID == v1alpha1.StartNodeID || specNodeID == v1alpha1.EndNodeID {
continue
}
// get node definition from workflow
var node *core.Node
- for _, n := range compiledWorkflowClosure.Primary.Template.Nodes {
- if n.Id == specNodeID {
+ for _, n := range compiledWorkflowClosure.GetPrimary().GetTemplate().GetNodes() {
+ if n.GetId() == specNodeID {
node = n
}
}
if node == nil {
return fmt.Errorf("failed to discover workflow node '%s' in workflow '%+v'",
- specNodeID, compiledWorkflowClosure.Primary.Template.Id)
+ specNodeID, compiledWorkflowClosure.GetPrimary().GetTemplate().GetId())
}
// parse node execution
@@ -489,10 +489,10 @@ func (m *MetricsManager) parseNodeExecutions(ctx context.Context, nodeExecutions
// prepend nodeExecution spans with node transition time
latestUpstreamNode := m.getLatestUpstreamNodeExecution(specNodeID,
- compiledWorkflowClosure.Primary.Connections.Upstream, nodeExecutions)
+ compiledWorkflowClosure.GetPrimary().GetConnections().GetUpstream(), nodeExecutions)
if latestUpstreamNode != nil {
- nodeExecutionSpan.Spans = append([]*core.Span{createOperationSpan(latestUpstreamNode.Closure.UpdatedAt,
- nodeExecution.Closure.CreatedAt, nodeTransition)}, nodeExecutionSpan.Spans...)
+ nodeExecutionSpan.Spans = append([]*core.Span{createOperationSpan(latestUpstreamNode.GetClosure().GetUpdatedAt(),
+ nodeExecution.GetClosure().GetCreatedAt(), nodeTransition)}, nodeExecutionSpan.GetSpans()...)
}
*spans = append(*spans, nodeExecutionSpan)
@@ -508,9 +508,9 @@ func (m *MetricsManager) parseSubworkflowNodeExecution(ctx context.Context,
// retrieve node execution(s)
nodeExecutions, err := m.getNodeExecutions(ctx, &admin.NodeExecutionListRequest{
- WorkflowExecutionId: nodeExecution.Id.ExecutionId,
+ WorkflowExecutionId: nodeExecution.GetId().GetExecutionId(),
Limit: RequestLimit,
- UniqueParentId: nodeExecution.Id.NodeId,
+ UniqueParentId: nodeExecution.GetId().GetNodeId(),
})
if err != nil {
return err
@@ -518,11 +518,11 @@ func (m *MetricsManager) parseSubworkflowNodeExecution(ctx context.Context,
// check if the subworkflow started
if len(nodeExecutions) == 0 {
- *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, nodeExecution.Closure.UpdatedAt, nodeSetup))
+ *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetCreatedAt(), nodeExecution.GetClosure().GetUpdatedAt(), nodeSetup))
} else {
// frontend overhead
startNode := nodeExecutions[v1alpha1.StartNodeID]
- *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, startNode.Closure.UpdatedAt, nodeSetup))
+ *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetCreatedAt(), startNode.GetClosure().GetUpdatedAt(), nodeSetup))
// retrieve workflow
workflowRequest := &admin.ObjectGetRequest{Id: identifier}
@@ -532,15 +532,15 @@ func (m *MetricsManager) parseSubworkflowNodeExecution(ctx context.Context,
}
// node execution(s)
- if err := m.parseNodeExecutions(ctx, nodeExecutions, workflow.Closure.CompiledWorkflow, spans, depth); err != nil {
+ if err := m.parseNodeExecutions(ctx, nodeExecutions, workflow.GetClosure().GetCompiledWorkflow(), spans, depth); err != nil {
return err
}
// backend overhead
latestUpstreamNode := m.getLatestUpstreamNodeExecution(v1alpha1.EndNodeID,
- workflow.Closure.CompiledWorkflow.Primary.Connections.Upstream, nodeExecutions)
- if latestUpstreamNode != nil && !nodeExecution.Closure.UpdatedAt.AsTime().Before(latestUpstreamNode.Closure.UpdatedAt.AsTime()) {
- *spans = append(*spans, createOperationSpan(latestUpstreamNode.Closure.UpdatedAt, nodeExecution.Closure.UpdatedAt, nodeTeardown))
+ workflow.GetClosure().GetCompiledWorkflow().GetPrimary().GetConnections().GetUpstream(), nodeExecutions)
+ if latestUpstreamNode != nil && !nodeExecution.GetClosure().GetUpdatedAt().AsTime().Before(latestUpstreamNode.GetClosure().GetUpdatedAt().AsTime()) {
+ *spans = append(*spans, createOperationSpan(latestUpstreamNode.GetClosure().GetUpdatedAt(), nodeExecution.GetClosure().GetUpdatedAt(), nodeTeardown))
}
}
@@ -553,32 +553,32 @@ func parseTaskExecution(taskExecution *admin.TaskExecution) *core.Span {
spans := make([]*core.Span, 0)
// check if plugin has started yet
- if taskExecution.Closure.StartedAt == nil || reflect.DeepEqual(taskExecution.Closure.StartedAt, emptyTimestamp) {
- spans = append(spans, createOperationSpan(taskExecution.Closure.CreatedAt, taskExecution.Closure.UpdatedAt, taskSetup))
+ if taskExecution.GetClosure().GetStartedAt() == nil || reflect.DeepEqual(taskExecution.GetClosure().GetStartedAt(), emptyTimestamp) {
+ spans = append(spans, createOperationSpan(taskExecution.GetClosure().GetCreatedAt(), taskExecution.GetClosure().GetUpdatedAt(), taskSetup))
} else {
// frontend overhead
- spans = append(spans, createOperationSpan(taskExecution.Closure.CreatedAt, taskExecution.Closure.StartedAt, taskSetup))
+ spans = append(spans, createOperationSpan(taskExecution.GetClosure().GetCreatedAt(), taskExecution.GetClosure().GetStartedAt(), taskSetup))
// check if plugin has completed yet
- if taskExecution.Closure.Duration == nil || reflect.DeepEqual(taskExecution.Closure.Duration, emptyDuration) {
- spans = append(spans, createOperationSpan(taskExecution.Closure.StartedAt, taskExecution.Closure.UpdatedAt, taskRuntime))
+ if taskExecution.GetClosure().GetDuration() == nil || reflect.DeepEqual(taskExecution.GetClosure().GetDuration(), emptyDuration) {
+ spans = append(spans, createOperationSpan(taskExecution.GetClosure().GetStartedAt(), taskExecution.GetClosure().GetUpdatedAt(), taskRuntime))
} else {
// plugin execution
- taskEndTime := timestamppb.New(taskExecution.Closure.StartedAt.AsTime().Add(taskExecution.Closure.Duration.AsDuration()))
- spans = append(spans, createOperationSpan(taskExecution.Closure.StartedAt, taskEndTime, taskRuntime))
+ taskEndTime := timestamppb.New(taskExecution.GetClosure().GetStartedAt().AsTime().Add(taskExecution.GetClosure().GetDuration().AsDuration()))
+ spans = append(spans, createOperationSpan(taskExecution.GetClosure().GetStartedAt(), taskEndTime, taskRuntime))
// backend overhead
- if !taskExecution.Closure.UpdatedAt.AsTime().Before(taskEndTime.AsTime()) {
- spans = append(spans, createOperationSpan(taskEndTime, taskExecution.Closure.UpdatedAt, taskTeardown))
+ if !taskExecution.GetClosure().GetUpdatedAt().AsTime().Before(taskEndTime.AsTime()) {
+ spans = append(spans, createOperationSpan(taskEndTime, taskExecution.GetClosure().GetUpdatedAt(), taskTeardown))
}
}
}
return &core.Span{
- StartTime: taskExecution.Closure.CreatedAt,
- EndTime: taskExecution.Closure.UpdatedAt,
+ StartTime: taskExecution.GetClosure().GetCreatedAt(),
+ EndTime: taskExecution.GetClosure().GetUpdatedAt(),
Id: &core.Span_TaskId{
- TaskId: taskExecution.Id,
+ TaskId: taskExecution.GetId(),
},
Spans: spans,
}
@@ -589,15 +589,15 @@ func parseTaskExecution(taskExecution *admin.TaskExecution) *core.Span {
func parseTaskExecutions(taskExecutions []*admin.TaskExecution, spans *[]*core.Span, depth int) {
// sort task executions
sort.Slice(taskExecutions, func(i, j int) bool {
- x := taskExecutions[i].Closure.CreatedAt.AsTime()
- y := taskExecutions[j].Closure.CreatedAt.AsTime()
+ x := taskExecutions[i].GetClosure().GetCreatedAt().AsTime()
+ y := taskExecutions[j].GetClosure().GetCreatedAt().AsTime()
return x.Before(y)
})
// iterate over task executions
for index, taskExecution := range taskExecutions {
if index > 0 {
- *spans = append(*spans, createOperationSpan(taskExecutions[index-1].Closure.UpdatedAt, taskExecution.Closure.CreatedAt, nodeReset))
+ *spans = append(*spans, createOperationSpan(taskExecutions[index-1].GetClosure().GetUpdatedAt(), taskExecution.GetClosure().GetCreatedAt(), nodeReset))
}
if depth != 0 {
@@ -611,7 +611,7 @@ func parseTaskExecutions(taskExecutions []*admin.TaskExecution, spans *[]*core.S
func (m *MetricsManager) parseTaskNodeExecution(ctx context.Context, nodeExecution *admin.NodeExecution, spans *[]*core.Span, depth int) error {
// retrieve task executions
taskExecutions, err := m.getTaskExecutions(ctx, &admin.TaskExecutionListRequest{
- NodeExecutionId: nodeExecution.Id,
+ NodeExecutionId: nodeExecution.GetId(),
Limit: RequestLimit,
})
if err != nil {
@@ -620,19 +620,19 @@ func (m *MetricsManager) parseTaskNodeExecution(ctx context.Context, nodeExecuti
// if no task executions then everything is execution overhead
if len(taskExecutions) == 0 {
- *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, nodeExecution.Closure.UpdatedAt, nodeSetup))
+ *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetCreatedAt(), nodeExecution.GetClosure().GetUpdatedAt(), nodeSetup))
} else {
// frontend overhead
- *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, taskExecutions[0].Closure.CreatedAt, nodeSetup))
+ *spans = append(*spans, createOperationSpan(nodeExecution.GetClosure().GetCreatedAt(), taskExecutions[0].GetClosure().GetCreatedAt(), nodeSetup))
// parse task executions
parseTaskExecutions(taskExecutions, spans, depth)
// backend overhead
lastTask := taskExecutions[len(taskExecutions)-1]
- if !nodeExecution.Closure.UpdatedAt.AsTime().Before(lastTask.Closure.UpdatedAt.AsTime()) {
- *spans = append(*spans, createOperationSpan(taskExecutions[len(taskExecutions)-1].Closure.UpdatedAt,
- nodeExecution.Closure.UpdatedAt, nodeTeardown))
+ if !nodeExecution.GetClosure().GetUpdatedAt().AsTime().Before(lastTask.GetClosure().GetUpdatedAt().AsTime()) {
+ *spans = append(*spans, createOperationSpan(taskExecutions[len(taskExecutions)-1].GetClosure().GetUpdatedAt(),
+ nodeExecution.GetClosure().GetUpdatedAt(), nodeTeardown))
}
}
@@ -645,13 +645,13 @@ func (m *MetricsManager) GetExecutionMetrics(ctx context.Context,
request *admin.WorkflowExecutionGetMetricsRequest) (*admin.WorkflowExecutionGetMetricsResponse, error) {
// retrieve workflow execution
- executionRequest := &admin.WorkflowExecutionGetRequest{Id: request.Id}
+ executionRequest := &admin.WorkflowExecutionGetRequest{Id: request.GetId()}
execution, err := m.executionManager.GetExecution(ctx, executionRequest)
if err != nil {
return nil, err
}
- span, err := m.parseExecution(ctx, execution, int(request.Depth))
+ span, err := m.parseExecution(ctx, execution, int(request.GetDepth()))
if err != nil {
return nil, err
}
diff --git a/flyteadmin/pkg/manager/impl/metrics_manager_test.go b/flyteadmin/pkg/manager/impl/metrics_manager_test.go
index e9392be8d9..b99e0d3243 100644
--- a/flyteadmin/pkg/manager/impl/metrics_manager_test.go
+++ b/flyteadmin/pkg/manager/impl/metrics_manager_test.go
@@ -28,8 +28,8 @@ var (
func addTimestamp(ts *timestamp.Timestamp, seconds int64) *timestamp.Timestamp {
return ×tamp.Timestamp{
- Seconds: ts.Seconds + seconds,
- Nanos: ts.Nanos,
+ Seconds: ts.GetSeconds() + seconds,
+ Nanos: ts.GetNanos(),
}
}
@@ -89,10 +89,10 @@ func parseSpans(spans []*core.Span) (map[string][]int64, int) {
operationDurations := make(map[string][]int64)
referenceCount := 0
for _, span := range spans {
- switch id := span.Id.(type) {
+ switch id := span.GetId().(type) {
case *core.Span_OperationId:
operationID := id.OperationId
- duration := span.EndTime.Seconds - span.StartTime.Seconds
+ duration := span.GetEndTime().GetSeconds() - span.GetStartTime().GetSeconds()
if array, exists := operationDurations[operationID]; exists {
operationDurations[operationID] = append(array, duration)
} else {
@@ -907,11 +907,11 @@ func TestParseTaskExecution(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
// parse task execution
span := parseTaskExecution(test.taskExecution)
- _, ok := span.Id.(*core.Span_TaskId)
+ _, ok := span.GetId().(*core.Span_TaskId)
assert.True(t, ok)
// validate spans
- operationDurations, referenceCount := parseSpans(span.Spans)
+ operationDurations, referenceCount := parseSpans(span.GetSpans())
assert.True(t, reflect.DeepEqual(test.operationDurations, operationDurations))
assert.Equal(t, 0, referenceCount)
})
diff --git a/flyteadmin/pkg/manager/impl/named_entity_manager.go b/flyteadmin/pkg/manager/impl/named_entity_manager.go
index 883948318a..a8ab24261e 100644
--- a/flyteadmin/pkg/manager/impl/named_entity_manager.go
+++ b/flyteadmin/pkg/manager/impl/named_entity_manager.go
@@ -41,10 +41,10 @@ func (m *NamedEntityManager) UpdateNamedEntity(ctx context.Context, request *adm
logger.Debugf(ctx, "invalid request [%+v]: %v", request, err)
return nil, err
}
- ctx = contextutils.WithProjectDomain(ctx, request.Id.Project, request.Id.Domain)
+ ctx = contextutils.WithProjectDomain(ctx, request.GetId().GetProject(), request.GetId().GetDomain())
// Ensure entity exists before trying to update it
- _, err := util.GetNamedEntity(ctx, m.db, request.ResourceType, request.Id)
+ _, err := util.GetNamedEntity(ctx, m.db, request.GetResourceType(), request.GetId())
if err != nil {
return nil, err
}
@@ -52,7 +52,7 @@ func (m *NamedEntityManager) UpdateNamedEntity(ctx context.Context, request *adm
metadataModel := transformers.CreateNamedEntityModel(request)
err = m.db.NamedEntityRepo().Update(ctx, metadataModel)
if err != nil {
- logger.Debugf(ctx, "Failed to update named_entity for [%+v] with err %v", request.Id, err)
+ logger.Debugf(ctx, "Failed to update named_entity for [%+v] with err %v", request.GetId(), err)
return nil, err
}
return &admin.NamedEntityUpdateResponse{}, nil
@@ -64,8 +64,8 @@ func (m *NamedEntityManager) GetNamedEntity(ctx context.Context, request *admin.
logger.Debugf(ctx, "invalid request [%+v]: %v", request, err)
return nil, err
}
- ctx = contextutils.WithProjectDomain(ctx, request.Id.Project, request.Id.Domain)
- return util.GetNamedEntity(ctx, m.db, request.ResourceType, request.Id)
+ ctx = contextutils.WithProjectDomain(ctx, request.GetId().GetProject(), request.GetId().GetDomain())
+ return util.GetNamedEntity(ctx, m.db, request.GetResourceType(), request.GetId())
}
func (m *NamedEntityManager) getQueryFilters(requestFilters string) ([]common.InlineFilter, error) {
@@ -98,51 +98,51 @@ func (m *NamedEntityManager) ListNamedEntities(ctx context.Context, request *adm
logger.Debugf(ctx, "invalid request [%+v]: %v", request, err)
return nil, err
}
- ctx = contextutils.WithProjectDomain(ctx, request.Project, request.Domain)
+ ctx = contextutils.WithProjectDomain(ctx, request.GetProject(), request.GetDomain())
- if len(request.Filters) == 0 {
+ if len(request.GetFilters()) == 0 {
// Add implicit filter to exclude system generated workflows
request.Filters = fmt.Sprintf("not_like(name,%s)", ".flytegen%")
}
// HACK: In order to filter by state (if requested) - we need to amend the filter to use COALESCE
// e.g. eq(state, 1) becomes 'WHERE (COALESCE(state, 0) = '1')' since not every NamedEntity necessarily
// has an entry, and therefore the default state value '0' (active), should be assumed.
- filters, err := m.getQueryFilters(request.Filters)
+ filters, err := m.getQueryFilters(request.GetFilters())
if err != nil {
return nil, err
}
- sortParameter, err := common.NewSortParameter(request.SortBy, models.NamedEntityColumns)
+ sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.NamedEntityColumns)
if err != nil {
return nil, err
}
- offset, err := validation.ValidateToken(request.Token)
+ offset, err := validation.ValidateToken(request.GetToken())
if err != nil {
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument,
- "invalid pagination token %s for ListNamedEntities", request.Token)
+ "invalid pagination token %s for ListNamedEntities", request.GetToken())
}
listInput := repoInterfaces.ListNamedEntityInput{
ListResourceInput: repoInterfaces.ListResourceInput{
- Limit: int(request.Limit),
+ Limit: int(request.GetLimit()),
Offset: offset,
InlineFilters: filters,
SortParameter: sortParameter,
},
- Project: request.Project,
- Domain: request.Domain,
- ResourceType: request.ResourceType,
+ Project: request.GetProject(),
+ Domain: request.GetDomain(),
+ ResourceType: request.GetResourceType(),
}
output, err := m.db.NamedEntityRepo().List(ctx, listInput)
if err != nil {
logger.Debugf(ctx, "Failed to list named entities of type: %s with project: %s, domain: %s. Returned error was: %v",
- request.ResourceType, request.Project, request.Domain, err)
+ request.GetResourceType(), request.GetProject(), request.GetDomain(), err)
return nil, err
}
var token string
- if len(output.Entities) == int(request.Limit) {
+ if len(output.Entities) == int(request.GetLimit()) {
token = strconv.Itoa(offset + len(output.Entities))
}
entities := transformers.FromNamedEntityModels(output.Entities)
diff --git a/flyteadmin/pkg/manager/impl/node_execution_manager.go b/flyteadmin/pkg/manager/impl/node_execution_manager.go
index 2f0f60977c..82e51bec9b 100644
--- a/flyteadmin/pkg/manager/impl/node_execution_manager.go
+++ b/flyteadmin/pkg/manager/impl/node_execution_manager.go
@@ -72,30 +72,30 @@ var isParent = common.NewMapFilter(map[string]interface{}{
})
func getNodeExecutionContext(ctx context.Context, identifier *core.NodeExecutionIdentifier) context.Context {
- ctx = contextutils.WithProjectDomain(ctx, identifier.ExecutionId.Project, identifier.ExecutionId.Domain)
- ctx = contextutils.WithExecutionID(ctx, identifier.ExecutionId.Name)
- return contextutils.WithNodeID(ctx, identifier.NodeId)
+ ctx = contextutils.WithProjectDomain(ctx, identifier.GetExecutionId().GetProject(), identifier.GetExecutionId().GetDomain())
+ ctx = contextutils.WithExecutionID(ctx, identifier.GetExecutionId().GetName())
+ return contextutils.WithNodeID(ctx, identifier.GetNodeId())
}
func (m *NodeExecutionManager) createNodeExecutionWithEvent(
ctx context.Context, request *admin.NodeExecutionEventRequest, dynamicWorkflowRemoteClosureReference string) error {
var parentTaskExecutionID *uint
- if request.Event.ParentTaskMetadata != nil {
- taskExecutionModel, err := util.GetTaskExecutionModel(ctx, m.db, request.Event.ParentTaskMetadata.Id)
+ if request.GetEvent().GetParentTaskMetadata() != nil {
+ taskExecutionModel, err := util.GetTaskExecutionModel(ctx, m.db, request.GetEvent().GetParentTaskMetadata().GetId())
if err != nil {
return err
}
parentTaskExecutionID = &taskExecutionModel.ID
}
var parentID *uint
- if request.Event.ParentNodeMetadata != nil {
+ if request.GetEvent().GetParentNodeMetadata() != nil {
parentNodeExecutionModel, err := util.GetNodeExecutionModel(ctx, m.db, &core.NodeExecutionIdentifier{
- ExecutionId: request.Event.Id.ExecutionId,
- NodeId: request.Event.ParentNodeMetadata.NodeId,
+ ExecutionId: request.GetEvent().GetId().GetExecutionId(),
+ NodeId: request.GetEvent().GetParentNodeMetadata().GetNodeId(),
})
if err != nil {
logger.Errorf(ctx, "failed to fetch node execution for the parent node: %v %s with err",
- request.Event.Id.ExecutionId, request.Event.ParentNodeMetadata.NodeId, err)
+ request.GetEvent().GetId().GetExecutionId(), request.GetEvent().GetParentNodeMetadata().GetNodeId(), err)
return err
}
parentID = &parentNodeExecutionModel.ID
@@ -110,12 +110,12 @@ func (m *NodeExecutionManager) createNodeExecutionWithEvent(
})
if err != nil {
logger.Debugf(ctx, "failed to create node execution model for event request: %s with err: %v",
- request.RequestId, err)
+ request.GetRequestId(), err)
return err
}
if err := m.db.NodeExecutionRepo().Create(ctx, nodeExecutionModel); err != nil {
logger.Debugf(ctx, "Failed to create node execution with id [%+v] and model [%+v] "+
- "with err %v", request.Event.Id, nodeExecutionModel, err)
+ "with err %v", request.GetEvent().GetId(), nodeExecutionModel, err)
return err
}
m.metrics.ClosureSizeBytes.Observe(float64(len(nodeExecutionModel.Closure)))
@@ -127,21 +127,21 @@ func (m *NodeExecutionManager) updateNodeExecutionWithEvent(
dynamicWorkflowRemoteClosureReference string) (updateNodeExecutionStatus, error) {
// If we have an existing execution, check if the phase change is valid
nodeExecPhase := core.NodeExecution_Phase(core.NodeExecution_Phase_value[nodeExecutionModel.Phase])
- if nodeExecPhase == request.Event.Phase {
- logger.Debugf(ctx, "This phase was already recorded %v for %+v", nodeExecPhase.String(), request.Event.Id)
+ if nodeExecPhase == request.GetEvent().GetPhase() {
+ logger.Debugf(ctx, "This phase was already recorded %v for %+v", nodeExecPhase.String(), request.GetEvent().GetId())
return updateFailed, errors.NewFlyteAdminErrorf(codes.AlreadyExists,
- "This phase was already recorded %v for %+v", nodeExecPhase.String(), request.Event.Id)
+ "This phase was already recorded %v for %+v", nodeExecPhase.String(), request.GetEvent().GetId())
} else if common.IsNodeExecutionTerminal(nodeExecPhase) {
// Cannot go from a terminal state to anything else
logger.Warnf(ctx, "Invalid phase change from %v to %v for node execution %v",
- nodeExecPhase.String(), request.Event.Phase.String(), request.Event.Id)
+ nodeExecPhase.String(), request.GetEvent().GetPhase().String(), request.GetEvent().GetId())
return alreadyInTerminalStatus, nil
}
// if this node execution kicked off a workflow, validate that the execution exists
var childExecutionID *core.WorkflowExecutionIdentifier
- if request.Event.GetWorkflowNodeMetadata() != nil {
- childExecutionID = request.Event.GetWorkflowNodeMetadata().ExecutionId
+ if request.GetEvent().GetWorkflowNodeMetadata() != nil {
+ childExecutionID = request.GetEvent().GetWorkflowNodeMetadata().GetExecutionId()
err := validation.ValidateWorkflowExecutionIdentifier(childExecutionID)
if err != nil {
logger.Errorf(ctx, "Invalid execution ID: %s with err: %v",
@@ -158,13 +158,13 @@ func (m *NodeExecutionManager) updateNodeExecutionWithEvent(
dynamicWorkflowRemoteClosureReference, m.config.ApplicationConfiguration().GetRemoteDataConfig().InlineEventDataPolicy,
m.storageClient)
if err != nil {
- logger.Debugf(ctx, "failed to update node execution model: %+v with err: %v", request.Event.Id, err)
+ logger.Debugf(ctx, "failed to update node execution model: %+v with err: %v", request.GetEvent().GetId(), err)
return updateFailed, err
}
err = m.db.NodeExecutionRepo().Update(ctx, nodeExecutionModel)
if err != nil {
logger.Debugf(ctx, "Failed to update node execution with id [%+v] with err %v",
- request.Event.Id, err)
+ request.GetEvent().GetId(), err)
return updateFailed, err
}
@@ -172,17 +172,17 @@ func (m *NodeExecutionManager) updateNodeExecutionWithEvent(
}
func formatDynamicWorkflowID(identifier *core.Identifier) string {
- return fmt.Sprintf("%s_%s_%s_%s", identifier.Project, identifier.Domain, identifier.Name, identifier.Version)
+ return fmt.Sprintf("%s_%s_%s_%s", identifier.GetProject(), identifier.GetDomain(), identifier.GetName(), identifier.GetVersion())
}
func (m *NodeExecutionManager) uploadDynamicWorkflowClosure(
ctx context.Context, nodeID *core.NodeExecutionIdentifier, workflowID *core.Identifier,
compiledWorkflowClosure *core.CompiledWorkflowClosure) (storage.DataReference, error) {
nestedSubKeys := []string{
- nodeID.ExecutionId.Project,
- nodeID.ExecutionId.Domain,
- nodeID.ExecutionId.Name,
- nodeID.NodeId,
+ nodeID.GetExecutionId().GetProject(),
+ nodeID.GetExecutionId().GetDomain(),
+ nodeID.GetExecutionId().GetName(),
+ nodeID.GetNodeId(),
formatDynamicWorkflowID(workflowID),
}
nestedKeys := append(m.storagePrefix, nestedSubKeys...)
@@ -204,17 +204,17 @@ func (m *NodeExecutionManager) uploadDynamicWorkflowClosure(
func (m *NodeExecutionManager) CreateNodeEvent(ctx context.Context, request *admin.NodeExecutionEventRequest) (
*admin.NodeExecutionEventResponse, error) {
if err := validation.ValidateNodeExecutionEventRequest(request, m.config.ApplicationConfiguration().GetRemoteDataConfig().MaxSizeInBytes); err != nil {
- logger.Debugf(ctx, "CreateNodeEvent called with invalid identifier [%+v]: %v", request.Event.Id, err)
+ logger.Debugf(ctx, "CreateNodeEvent called with invalid identifier [%+v]: %v", request.GetEvent().GetId(), err)
}
- ctx = getNodeExecutionContext(ctx, request.Event.Id)
+ ctx = getNodeExecutionContext(ctx, request.GetEvent().GetId())
logger.Debugf(ctx, "Received node execution event for Node Exec Id [%+v] transitioning to phase [%v], w/ Metadata [%v]",
- request.Event.Id, request.Event.Phase, request.Event.ParentTaskMetadata)
+ request.GetEvent().GetId(), request.GetEvent().GetPhase(), request.GetEvent().GetParentTaskMetadata())
- executionID := request.Event.Id.ExecutionId
+ executionID := request.GetEvent().GetId().GetExecutionId()
workflowExecution, err := m.db.ExecutionRepo().Get(ctx, repoInterfaces.Identifier{
- Project: executionID.Project,
- Domain: executionID.Domain,
- Name: executionID.Name,
+ Project: executionID.GetProject(),
+ Domain: executionID.GetDomain(),
+ Name: executionID.GetName(),
})
if err != nil {
m.metrics.MissingWorkflowExecution.Inc()
@@ -228,15 +228,15 @@ func (m *NodeExecutionManager) CreateNodeEvent(ctx context.Context, request *adm
return nil, fmt.Errorf("failed to get existing execution id: [%+v]", executionID)
}
- if err := validation.ValidateCluster(ctx, workflowExecution.Cluster, request.Event.ProducerId); err != nil {
+ if err := validation.ValidateCluster(ctx, workflowExecution.Cluster, request.GetEvent().GetProducerId()); err != nil {
return nil, err
}
var dynamicWorkflowRemoteClosureReference string
- if request.Event.GetTaskNodeMetadata() != nil && request.Event.GetTaskNodeMetadata().DynamicWorkflow != nil {
+ if request.GetEvent().GetTaskNodeMetadata() != nil && request.GetEvent().GetTaskNodeMetadata().GetDynamicWorkflow() != nil {
dynamicWorkflowRemoteClosureDataReference, err := m.uploadDynamicWorkflowClosure(
- ctx, request.Event.Id, request.Event.GetTaskNodeMetadata().DynamicWorkflow.Id,
- request.Event.GetTaskNodeMetadata().DynamicWorkflow.CompiledWorkflow)
+ ctx, request.GetEvent().GetId(), request.GetEvent().GetTaskNodeMetadata().GetDynamicWorkflow().GetId(),
+ request.GetEvent().GetTaskNodeMetadata().GetDynamicWorkflow().GetCompiledWorkflow())
if err != nil {
return nil, err
}
@@ -244,12 +244,12 @@ func (m *NodeExecutionManager) CreateNodeEvent(ctx context.Context, request *adm
}
nodeExecutionModel, err := m.db.NodeExecutionRepo().Get(ctx, repoInterfaces.NodeExecutionResource{
- NodeExecutionIdentifier: request.Event.Id,
+ NodeExecutionIdentifier: request.GetEvent().GetId(),
})
if err != nil {
if err.(errors.FlyteAdminError).Code() != codes.NotFound {
logger.Debugf(ctx, "Failed to retrieve existing node execution with id [%+v] with err: %v",
- request.Event.Id, err)
+ request.GetEvent().GetId(), err)
return nil, err
}
err = m.createNodeExecutionWithEvent(ctx, request, dynamicWorkflowRemoteClosureReference)
@@ -265,33 +265,33 @@ func (m *NodeExecutionManager) CreateNodeEvent(ctx context.Context, request *adm
}
if updateStatus == alreadyInTerminalStatus {
- curPhase := request.Event.Phase.String()
+ curPhase := request.GetEvent().GetPhase().String()
errorMsg := fmt.Sprintf("Invalid phase change from %s to %s for node execution %v", phase.String(), curPhase, nodeExecutionModel.ID)
return nil, errors.NewAlreadyInTerminalStateError(ctx, errorMsg, curPhase)
}
}
m.dbEventWriter.Write(request)
- if request.Event.Phase == core.NodeExecution_RUNNING {
+ if request.GetEvent().GetPhase() == core.NodeExecution_RUNNING {
m.metrics.ActiveNodeExecutions.Inc()
- } else if common.IsNodeExecutionTerminal(request.Event.Phase) {
+ } else if common.IsNodeExecutionTerminal(request.GetEvent().GetPhase()) {
m.metrics.ActiveNodeExecutions.Dec()
- m.metrics.NodeExecutionsTerminated.Inc(contextutils.WithPhase(ctx, request.Event.Phase.String()))
- if request.Event.GetOutputData() != nil {
- m.metrics.NodeExecutionOutputBytes.Observe(float64(proto.Size(request.Event.GetOutputData())))
+ m.metrics.NodeExecutionsTerminated.Inc(contextutils.WithPhase(ctx, request.GetEvent().GetPhase().String()))
+ if request.GetEvent().GetOutputData() != nil {
+ m.metrics.NodeExecutionOutputBytes.Observe(float64(proto.Size(request.GetEvent().GetOutputData())))
}
}
m.metrics.NodeExecutionEventsCreated.Inc()
if err := m.eventPublisher.Publish(ctx, proto.MessageName(request), request); err != nil {
m.metrics.PublishEventError.Inc()
- logger.Infof(ctx, "error publishing event [%+v] with err: [%v]", request.RequestId, err)
+ logger.Infof(ctx, "error publishing event [%+v] with err: [%v]", request.GetRequestId(), err)
}
go func() {
ceCtx := context.TODO()
if err := m.cloudEventPublisher.Publish(ceCtx, proto.MessageName(request), request); err != nil {
- logger.Infof(ctx, "error publishing cloud event [%+v] with err: [%v]", request.RequestId, err)
+ logger.Infof(ctx, "error publishing cloud event [%+v] with err: [%v]", request.GetRequestId(), err)
}
}()
@@ -299,15 +299,15 @@ func (m *NodeExecutionManager) CreateNodeEvent(ctx context.Context, request *adm
}
func (m *NodeExecutionManager) GetDynamicNodeWorkflow(ctx context.Context, request *admin.GetDynamicNodeWorkflowRequest) (*admin.DynamicNodeWorkflowResponse, error) {
- if err := validation.ValidateNodeExecutionIdentifier(request.Id); err != nil {
- logger.Debugf(ctx, "can't get node execution data with invalid identifier [%+v]: %v", request.Id, err)
+ if err := validation.ValidateNodeExecutionIdentifier(request.GetId()); err != nil {
+ logger.Debugf(ctx, "can't get node execution data with invalid identifier [%+v]: %v", request.GetId(), err)
}
- ctx = getNodeExecutionContext(ctx, request.Id)
- nodeExecutionModel, err := util.GetNodeExecutionModel(ctx, m.db, request.Id)
+ ctx = getNodeExecutionContext(ctx, request.GetId())
+ nodeExecutionModel, err := util.GetNodeExecutionModel(ctx, m.db, request.GetId())
if err != nil {
logger.Errorf(ctx, "failed to get node execution with id [%+v] with err %v",
- request.Id, err)
+ request.GetId(), err)
return nil, err
}
@@ -331,7 +331,7 @@ func (m *NodeExecutionManager) transformNodeExecutionModel(ctx context.Context,
if err != nil {
return nil, err
}
- if internalData.EventVersion == 0 {
+ if internalData.GetEventVersion() == 0 {
// Issue more expensive query to determine whether this node is a parent and/or dynamic node.
nodeExecutionModel, err = m.db.NodeExecutionRepo().GetWithChildren(ctx, repoInterfaces.NodeExecutionResource{
NodeExecutionIdentifier: nodeExecutionID,
@@ -370,17 +370,17 @@ func (m *NodeExecutionManager) transformNodeExecutionModelList(ctx context.Conte
func (m *NodeExecutionManager) GetNodeExecution(
ctx context.Context, request *admin.NodeExecutionGetRequest) (*admin.NodeExecution, error) {
- if err := validation.ValidateNodeExecutionIdentifier(request.Id); err != nil {
- logger.Debugf(ctx, "get node execution called with invalid identifier [%+v]: %v", request.Id, err)
+ if err := validation.ValidateNodeExecutionIdentifier(request.GetId()); err != nil {
+ logger.Debugf(ctx, "get node execution called with invalid identifier [%+v]: %v", request.GetId(), err)
}
- ctx = getNodeExecutionContext(ctx, request.Id)
- nodeExecutionModel, err := util.GetNodeExecutionModel(ctx, m.db, request.Id)
+ ctx = getNodeExecutionContext(ctx, request.GetId())
+ nodeExecutionModel, err := util.GetNodeExecutionModel(ctx, m.db, request.GetId())
if err != nil {
logger.Debugf(ctx, "Failed to get node execution with id [%+v] with err %v",
- request.Id, err)
+ request.GetId(), err)
return nil, err
}
- nodeExecution, err := m.transformNodeExecutionModel(ctx, *nodeExecutionModel, request.Id, nil)
+ nodeExecution, err := m.transformNodeExecutionModel(ctx, *nodeExecutionModel, request.GetId(), nil)
if err != nil {
return nil, err
}
@@ -448,17 +448,17 @@ func (m *NodeExecutionManager) ListNodeExecutions(
if err := validation.ValidateNodeExecutionListRequest(request); err != nil {
return nil, err
}
- ctx = getExecutionContext(ctx, request.WorkflowExecutionId)
+ ctx = getExecutionContext(ctx, request.GetWorkflowExecutionId())
- identifierFilters, err := util.GetWorkflowExecutionIdentifierFilters(ctx, request.WorkflowExecutionId, common.NodeExecution)
+ identifierFilters, err := util.GetWorkflowExecutionIdentifierFilters(ctx, request.GetWorkflowExecutionId(), common.NodeExecution)
if err != nil {
return nil, err
}
var mapFilters []common.MapFilter
- if request.UniqueParentId != "" {
+ if request.GetUniqueParentId() != "" {
parentNodeExecution, err := util.GetNodeExecutionModel(ctx, m.db, &core.NodeExecutionIdentifier{
- ExecutionId: request.WorkflowExecutionId,
- NodeId: request.UniqueParentId,
+ ExecutionId: request.GetWorkflowExecutionId(),
+ NodeId: request.GetUniqueParentId(),
})
if err != nil {
return nil, err
@@ -475,7 +475,7 @@ func (m *NodeExecutionManager) ListNodeExecutions(
}
}
return m.listNodeExecutions(
- ctx, identifierFilters, request.Filters, request.Limit, request.Token, request.SortBy, mapFilters)
+ ctx, identifierFilters, request.GetFilters(), request.GetLimit(), request.GetToken(), request.GetSortBy(), mapFilters)
}
// Filters on node executions matching the execution parameters (execution project, domain, and name) as well as the
@@ -486,13 +486,13 @@ func (m *NodeExecutionManager) ListNodeExecutionsForTask(
if err := validation.ValidateNodeExecutionForTaskListRequest(request); err != nil {
return nil, err
}
- ctx = getTaskExecutionContext(ctx, request.TaskExecutionId)
+ ctx = getTaskExecutionContext(ctx, request.GetTaskExecutionId())
identifierFilters, err := util.GetWorkflowExecutionIdentifierFilters(
- ctx, request.TaskExecutionId.NodeExecutionId.ExecutionId, common.NodeExecution)
+ ctx, request.GetTaskExecutionId().GetNodeExecutionId().GetExecutionId(), common.NodeExecution)
if err != nil {
return nil, err
}
- parentTaskExecutionModel, err := util.GetTaskExecutionModel(ctx, m.db, request.TaskExecutionId)
+ parentTaskExecutionModel, err := util.GetTaskExecutionModel(ctx, m.db, request.GetTaskExecutionId())
if err != nil {
return nil, err
}
@@ -503,26 +503,26 @@ func (m *NodeExecutionManager) ListNodeExecutionsForTask(
}
identifierFilters = append(identifierFilters, nodeIDFilter)
return m.listNodeExecutions(
- ctx, identifierFilters, request.Filters, request.Limit, request.Token, request.SortBy, nil)
+ ctx, identifierFilters, request.GetFilters(), request.GetLimit(), request.GetToken(), request.GetSortBy(), nil)
}
func (m *NodeExecutionManager) GetNodeExecutionData(
ctx context.Context, request *admin.NodeExecutionGetDataRequest) (*admin.NodeExecutionGetDataResponse, error) {
- if err := validation.ValidateNodeExecutionIdentifier(request.Id); err != nil {
- logger.Debugf(ctx, "can't get node execution data with invalid identifier [%+v]: %v", request.Id, err)
+ if err := validation.ValidateNodeExecutionIdentifier(request.GetId()); err != nil {
+ logger.Debugf(ctx, "can't get node execution data with invalid identifier [%+v]: %v", request.GetId(), err)
}
- ctx = getNodeExecutionContext(ctx, request.Id)
- nodeExecutionModel, err := util.GetNodeExecutionModel(ctx, m.db, request.Id)
+ ctx = getNodeExecutionContext(ctx, request.GetId())
+ nodeExecutionModel, err := util.GetNodeExecutionModel(ctx, m.db, request.GetId())
if err != nil {
logger.Debugf(ctx, "Failed to get node execution with id [%+v] with err %v",
- request.Id, err)
+ request.GetId(), err)
return nil, err
}
nodeExecution, err := transformers.FromNodeExecutionModel(*nodeExecutionModel, transformers.DefaultExecutionTransformerOptions)
if err != nil {
- logger.Debugf(ctx, "failed to transform node execution model [%+v] when fetching data: %v", request.Id, err)
+ logger.Debugf(ctx, "failed to transform node execution model [%+v] when fetching data: %v", request.GetId(), err)
return nil, err
}
@@ -532,7 +532,7 @@ func (m *NodeExecutionManager) GetNodeExecutionData(
group.Go(func() error {
var err error
inputs, inputURLBlob, err = util.GetInputs(groupCtx, m.urlData, m.config.ApplicationConfiguration().GetRemoteDataConfig(),
- m.storageClient, nodeExecution.InputUri)
+ m.storageClient, nodeExecution.GetInputUri())
return err
})
@@ -541,7 +541,7 @@ func (m *NodeExecutionManager) GetNodeExecutionData(
group.Go(func() error {
var err error
outputs, outputURLBlob, err = util.GetOutputs(groupCtx, m.urlData, m.config.ApplicationConfiguration().GetRemoteDataConfig(),
- m.storageClient, nodeExecution.Closure)
+ m.storageClient, nodeExecution.GetClosure())
return err
})
@@ -555,7 +555,7 @@ func (m *NodeExecutionManager) GetNodeExecutionData(
Outputs: outputURLBlob,
FullInputs: inputs,
FullOutputs: outputs,
- FlyteUrls: common.FlyteURLsFromNodeExecutionID(request.Id, nodeExecution.GetClosure() != nil && nodeExecution.GetClosure().GetDeckUri() != ""),
+ FlyteUrls: common.FlyteURLsFromNodeExecutionID(request.GetId(), nodeExecution.GetClosure() != nil && nodeExecution.GetClosure().GetDeckUri() != ""),
}
if len(nodeExecutionModel.DynamicWorkflowRemoteClosureReference) > 0 {
@@ -565,17 +565,17 @@ func (m *NodeExecutionManager) GetNodeExecutionData(
}
response.DynamicWorkflow = &admin.DynamicWorkflowNodeMetadata{
- Id: closure.Primary.Template.Id,
+ Id: closure.GetPrimary().GetTemplate().GetId(),
CompiledWorkflow: closure,
- DynamicJobSpecUri: nodeExecution.Closure.DynamicJobSpecUri,
+ DynamicJobSpecUri: nodeExecution.GetClosure().GetDynamicJobSpecUri(),
}
}
- m.metrics.NodeExecutionInputBytes.Observe(float64(response.Inputs.Bytes))
- if response.Outputs.Bytes > 0 {
- m.metrics.NodeExecutionOutputBytes.Observe(float64(response.Outputs.Bytes))
- } else if response.FullOutputs != nil {
- m.metrics.NodeExecutionOutputBytes.Observe(float64(proto.Size(response.FullOutputs)))
+ m.metrics.NodeExecutionInputBytes.Observe(float64(response.GetInputs().GetBytes()))
+ if response.GetOutputs().GetBytes() > 0 {
+ m.metrics.NodeExecutionOutputBytes.Observe(float64(response.GetOutputs().GetBytes()))
+ } else if response.GetFullOutputs() != nil {
+ m.metrics.NodeExecutionOutputBytes.Observe(float64(proto.Size(response.GetFullOutputs())))
}
return response, nil
@@ -588,9 +588,9 @@ func (m *NodeExecutionManager) fetchDynamicWorkflowClosure(ctx context.Context,
return nil, errors.NewFlyteAdminErrorf(codes.Internal, "Unable to read WorkflowClosure from location %s : %v", ref, err)
}
- if wf := closure.Primary; wf == nil {
+ if wf := closure.GetPrimary(); wf == nil {
return nil, errors.NewFlyteAdminErrorf(codes.Internal, "Empty primary workflow definition in loaded dynamic workflow model.")
- } else if template := wf.Template; template == nil {
+ } else if template := wf.GetTemplate(); template == nil {
return nil, errors.NewFlyteAdminErrorf(codes.Internal, "Empty primary workflow template in loaded dynamic workflow model.")
}
return closure, nil
diff --git a/flyteadmin/pkg/manager/impl/node_execution_manager_test.go b/flyteadmin/pkg/manager/impl/node_execution_manager_test.go
index b43c785b33..69a0203452 100644
--- a/flyteadmin/pkg/manager/impl/node_execution_manager_test.go
+++ b/flyteadmin/pkg/manager/impl/node_execution_manager_test.go
@@ -71,7 +71,7 @@ var request = &admin.NodeExecutionEventRequest{
TargetMetadata: &event.NodeExecutionEvent_TaskNodeMetadata{
TaskNodeMetadata: &event.TaskNodeMetadata{
DynamicWorkflow: &event.DynamicWorkflowNodeMetadata{
- Id: dynamicWorkflowClosure.Primary.Template.Id,
+ Id: dynamicWorkflowClosure.GetPrimary().GetTemplate().GetId(),
CompiledWorkflow: dynamicWorkflowClosure,
},
},
@@ -131,7 +131,7 @@ func TestCreateNodeEvent(t *testing.T) {
return models.NodeExecution{}, flyteAdminErrors.NewFlyteAdminError(codes.NotFound, "foo")
})
expectedClosure := admin.NodeExecutionClosure{
- Phase: request.Event.Phase,
+ Phase: request.GetEvent().GetPhase(),
StartedAt: occurredAtProto,
CreatedAt: occurredAtProto,
UpdatedAt: occurredAtProto,
@@ -450,8 +450,8 @@ func TestTransformNodeExecutionModel(t *testing.T) {
}
nodeExecution, err := manager.transformNodeExecutionModel(ctx, models.NodeExecution{}, nodeExecID, transformers.DefaultExecutionTransformerOptions)
assert.NoError(t, err)
- assert.True(t, proto.Equal(nodeExecID, nodeExecution.Id))
- assert.True(t, nodeExecution.Metadata.IsParentNode)
+ assert.True(t, proto.Equal(nodeExecID, nodeExecution.GetId()))
+ assert.True(t, nodeExecution.GetMetadata().GetIsParentNode())
})
t.Run("event version > 0", func(t *testing.T) {
manager := NodeExecutionManager{
@@ -480,8 +480,8 @@ func TestTransformNodeExecutionModel(t *testing.T) {
InternalData: internalDataBytes,
}, nodeExecID, transformers.DefaultExecutionTransformerOptions)
assert.NoError(t, err)
- assert.True(t, nodeExecution.Metadata.IsParentNode)
- assert.True(t, nodeExecution.Metadata.IsDynamic)
+ assert.True(t, nodeExecution.GetMetadata().GetIsParentNode())
+ assert.True(t, nodeExecution.GetMetadata().GetIsDynamic())
})
t.Run("transform internal data err", func(t *testing.T) {
manager := NodeExecutionManager{
@@ -865,7 +865,7 @@ func TestListNodeExecutionsLevelZero(t *testing.T) {
},
})
assert.NoError(t, err)
- assert.Len(t, nodeExecutions.NodeExecutions, 1)
+ assert.Len(t, nodeExecutions.GetNodeExecutions(), 1)
assert.True(t, proto.Equal(&admin.NodeExecution{
Id: &core.NodeExecutionIdentifier{
NodeId: "node id",
@@ -878,8 +878,8 @@ func TestListNodeExecutionsLevelZero(t *testing.T) {
InputUri: "input uri",
Closure: &expectedClosure,
Metadata: &expectedMetadata,
- }, nodeExecutions.NodeExecutions[0]))
- assert.Equal(t, "3", nodeExecutions.Token)
+ }, nodeExecutions.GetNodeExecutions()[0]))
+ assert.Equal(t, "3", nodeExecutions.GetToken())
}
func TestListNodeExecutionsWithParent(t *testing.T) {
@@ -895,7 +895,7 @@ func TestListNodeExecutionsWithParent(t *testing.T) {
closureBytes, _ := proto.Marshal(&expectedClosure)
parentID := uint(12)
repository.NodeExecutionRepo().(*repositoryMocks.MockNodeExecutionRepo).SetGetCallback(func(ctx context.Context, input interfaces.NodeExecutionResource) (execution models.NodeExecution, e error) {
- assert.Equal(t, "parent_1", input.NodeExecutionIdentifier.NodeId)
+ assert.Equal(t, "parent_1", input.NodeExecutionIdentifier.GetNodeId())
return models.NodeExecution{
BaseModel: models.BaseModel{
ID: parentID,
@@ -966,7 +966,7 @@ func TestListNodeExecutionsWithParent(t *testing.T) {
UniqueParentId: "parent_1",
})
assert.Nil(t, err)
- assert.Len(t, nodeExecutions.NodeExecutions, 1)
+ assert.Len(t, nodeExecutions.GetNodeExecutions(), 1)
assert.True(t, proto.Equal(&admin.NodeExecution{
Id: &core.NodeExecutionIdentifier{
NodeId: "node id",
@@ -979,8 +979,8 @@ func TestListNodeExecutionsWithParent(t *testing.T) {
InputUri: "input uri",
Closure: &expectedClosure,
Metadata: &expectedMetadata,
- }, nodeExecutions.NodeExecutions[0]))
- assert.Equal(t, "3", nodeExecutions.Token)
+ }, nodeExecutions.GetNodeExecutions()[0]))
+ assert.Equal(t, "3", nodeExecutions.GetToken())
}
func TestListNodeExecutions_WithJoinTableFilter(t *testing.T) {
@@ -1089,7 +1089,7 @@ func TestListNodeExecutions_WithJoinTableFilter(t *testing.T) {
Filters: "eq(execution.phase, SUCCEEDED)",
})
assert.NoError(t, err)
- assert.Len(t, nodeExecutions.NodeExecutions, 1)
+ assert.Len(t, nodeExecutions.GetNodeExecutions(), 1)
assert.True(t, proto.Equal(&admin.NodeExecution{
Id: &core.NodeExecutionIdentifier{
NodeId: "node id",
@@ -1102,8 +1102,8 @@ func TestListNodeExecutions_WithJoinTableFilter(t *testing.T) {
InputUri: "input uri",
Closure: &expectedClosure,
Metadata: &expectedMetadata,
- }, nodeExecutions.NodeExecutions[0]))
- assert.Equal(t, "3", nodeExecutions.Token)
+ }, nodeExecutions.GetNodeExecutions()[0]))
+ assert.Equal(t, "3", nodeExecutions.GetToken())
}
func TestListNodeExecutions_InvalidParams(t *testing.T) {
@@ -1316,7 +1316,7 @@ func TestListNodeExecutionsForTask(t *testing.T) {
},
})
assert.Nil(t, err)
- assert.Len(t, nodeExecutions.NodeExecutions, 1)
+ assert.Len(t, nodeExecutions.GetNodeExecutions(), 1)
expectedMetadata := admin.NodeExecutionMetaData{
SpecNodeId: "spec-n1",
IsParentNode: true,
@@ -1333,8 +1333,8 @@ func TestListNodeExecutionsForTask(t *testing.T) {
InputUri: "input uri",
Closure: &expectedClosure,
Metadata: &expectedMetadata,
- }, nodeExecutions.NodeExecutions[0]))
- assert.Equal(t, "3", nodeExecutions.Token)
+ }, nodeExecutions.GetNodeExecutions()[0]))
+ assert.Equal(t, "3", nodeExecutions.GetToken())
}
func TestGetNodeExecutionData(t *testing.T) {
@@ -1439,7 +1439,7 @@ func TestGetNodeExecutionData(t *testing.T) {
FullInputs: fullInputs,
FullOutputs: fullOutputs,
DynamicWorkflow: &admin.DynamicWorkflowNodeMetadata{
- Id: dynamicWorkflowClosure.Primary.Template.Id,
+ Id: dynamicWorkflowClosure.GetPrimary().GetTemplate().GetId(),
CompiledWorkflow: dynamicWorkflowClosure,
},
FlyteUrls: &admin.FlyteURLs{
@@ -1465,7 +1465,7 @@ func Test_GetDynamicNodeWorkflow_Success(t *testing.T) {
return models.NodeExecution{DynamicWorkflowRemoteClosureReference: remoteClosureIdentifier}, nil
})
mockStorageClient := commonMocks.GetMockStorageClient()
- expectedClosure := testutils.GetWorkflowClosure().CompiledWorkflow
+ expectedClosure := testutils.GetWorkflowClosure().GetCompiledWorkflow()
mockStorageClient.ComposedProtobufStore.(*commonMocks.TestDataStore).ReadProtobufCb = func(ctx context.Context, reference storage.DataReference, msg proto.Message) error {
assert.Equal(t, remoteClosureIdentifier, reference.String())
bytes, err := proto.Marshal(expectedClosure)
diff --git a/flyteadmin/pkg/manager/impl/project_manager.go b/flyteadmin/pkg/manager/impl/project_manager.go
index a1ac99b412..a19b61ca01 100644
--- a/flyteadmin/pkg/manager/impl/project_manager.go
+++ b/flyteadmin/pkg/manager/impl/project_manager.go
@@ -33,7 +33,7 @@ func (m *ProjectManager) CreateProject(ctx context.Context, request *admin.Proje
if err := validation.ValidateProjectRegisterRequest(request); err != nil {
return nil, err
}
- projectModel := transformers.CreateProjectModel(request.Project)
+ projectModel := transformers.CreateProjectModel(request.GetProject())
err := m.db.ProjectRepo().Create(ctx, projectModel)
if err != nil {
return nil, err
@@ -44,14 +44,14 @@ func (m *ProjectManager) CreateProject(ctx context.Context, request *admin.Proje
func (m *ProjectManager) ListProjects(ctx context.Context, request *admin.ProjectListRequest) (*admin.Projects, error) {
spec := util.FilterSpec{
- RequestFilters: request.Filters,
+ RequestFilters: request.GetFilters(),
}
filters, err := util.GetDbFilters(spec, common.Project)
if err != nil {
return nil, err
}
- sortParameter, err := common.NewSortParameter(request.SortBy, models.ProjectColumns)
+ sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.ProjectColumns)
if err != nil {
return nil, err
}
@@ -59,14 +59,14 @@ func (m *ProjectManager) ListProjects(ctx context.Context, request *admin.Projec
sortParameter = alphabeticalSortParam
}
- offset, err := validation.ValidateToken(request.Token)
+ offset, err := validation.ValidateToken(request.GetToken())
if err != nil {
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument,
- "invalid pagination token %s for ListProjects", request.Token)
+ "invalid pagination token %s for ListProjects", request.GetToken())
}
// And finally, query the database
listProjectsInput := repoInterfaces.ListResourceInput{
- Limit: int(request.Limit),
+ Limit: int(request.GetLimit()),
Offset: offset,
InlineFilters: filters,
SortParameter: sortParameter,
@@ -75,10 +75,10 @@ func (m *ProjectManager) ListProjects(ctx context.Context, request *admin.Projec
if err != nil {
return nil, err
}
- projects := transformers.FromProjectModels(projectModels, m.GetDomains(ctx, &admin.GetDomainRequest{}).Domains)
+ projects := transformers.FromProjectModels(projectModels, m.GetDomains(ctx, &admin.GetDomainRequest{}).GetDomains())
var token string
- if len(projects) == int(request.Limit) {
+ if len(projects) == int(request.GetLimit()) {
token = strconv.Itoa(offset + len(projects))
}
@@ -93,7 +93,7 @@ func (m *ProjectManager) UpdateProject(ctx context.Context, projectUpdate *admin
projectRepo := m.db.ProjectRepo()
// Fetch the existing project if exists. If not, return err and do not update.
- _, err := projectRepo.Get(ctx, projectUpdate.Id)
+ _, err := projectRepo.Get(ctx, projectUpdate.GetId())
if err != nil {
return nil, err
}
@@ -118,11 +118,11 @@ func (m *ProjectManager) GetProject(ctx context.Context, request *admin.ProjectG
if err := validation.ValidateProjectGetRequest(request); err != nil {
return nil, err
}
- projectModel, err := m.db.ProjectRepo().Get(ctx, request.Id)
+ projectModel, err := m.db.ProjectRepo().Get(ctx, request.GetId())
if err != nil {
return nil, err
}
- projectResponse := transformers.FromProjectModel(projectModel, m.GetDomains(ctx, &admin.GetDomainRequest{}).Domains)
+ projectResponse := transformers.FromProjectModel(projectModel, m.GetDomains(ctx, &admin.GetDomainRequest{}).GetDomains())
return projectResponse, nil
}
diff --git a/flyteadmin/pkg/manager/impl/project_manager_test.go b/flyteadmin/pkg/manager/impl/project_manager_test.go
index 38117a7ec9..42bf93cafb 100644
--- a/flyteadmin/pkg/manager/impl/project_manager_test.go
+++ b/flyteadmin/pkg/manager/impl/project_manager_test.go
@@ -70,11 +70,11 @@ func testListProjects(request *admin.ProjectListRequest, token string, orderExpr
resp, err := projectManager.ListProjects(context.Background(), request)
assert.NoError(t, err)
- assert.Len(t, resp.Projects, 1)
+ assert.Len(t, resp.GetProjects(), 1)
assert.Equal(t, token, resp.GetToken())
- assert.Len(t, resp.Projects[0].Domains, 4)
- for _, domain := range resp.Projects[0].Domains {
- assert.Contains(t, testDomainsForProjManager, domain.Id)
+ assert.Len(t, resp.GetProjects()[0].GetDomains(), 4)
+ for _, domain := range resp.GetProjects()[0].GetDomains() {
+ assert.Contains(t, testDomainsForProjManager, domain.GetId())
}
}
@@ -300,10 +300,10 @@ func TestProjectManager_TestGetProject(t *testing.T) {
resp, _ := projectManager.GetProject(context.Background(),
mockedProject)
- assert.Equal(t, mockedProject.Id, resp.Id)
- assert.Equal(t, "a-mocked-project", resp.Name)
- assert.Equal(t, "A mocked project", resp.Description)
- assert.Equal(t, admin.Project_ProjectState(0), resp.State)
+ assert.Equal(t, mockedProject.GetId(), resp.GetId())
+ assert.Equal(t, "a-mocked-project", resp.GetName())
+ assert.Equal(t, "A mocked project", resp.GetDescription())
+ assert.Equal(t, admin.Project_ProjectState(0), resp.GetState())
}
func TestProjectManager_TestGetProject_ErrorDueToProjectNotFound(t *testing.T) {
diff --git a/flyteadmin/pkg/manager/impl/resources/resource_manager.go b/flyteadmin/pkg/manager/impl/resources/resource_manager.go
index b1304930cf..4dad45d987 100644
--- a/flyteadmin/pkg/manager/impl/resources/resource_manager.go
+++ b/flyteadmin/pkg/manager/impl/resources/resource_manager.go
@@ -76,7 +76,7 @@ func (m *ResourceManager) createOrMergeUpdateWorkflowAttributes(
return nil, err
}
updatedModel, err := transformers.MergeUpdateWorkflowAttributes(
- ctx, existing, resourceType, &resourceID, request.Attributes)
+ ctx, existing, resourceType, &resourceID, request.GetAttributes())
if err != nil {
return nil, err
}
@@ -96,11 +96,11 @@ func (m *ResourceManager) UpdateWorkflowAttributes(
return nil, err
}
- model, err := transformers.WorkflowAttributesToResourceModel(request.Attributes, resource)
+ model, err := transformers.WorkflowAttributesToResourceModel(request.GetAttributes(), resource)
if err != nil {
return nil, err
}
- if request.Attributes.GetMatchingAttributes().GetPluginOverrides() != nil {
+ if request.GetAttributes().GetMatchingAttributes().GetPluginOverrides() != nil {
return m.createOrMergeUpdateWorkflowAttributes(ctx, request, model, admin.MatchableResource_PLUGIN_OVERRIDE)
}
err = m.db.ResourceRepo().CreateOrUpdate(ctx, model)
@@ -118,7 +118,7 @@ func (m *ResourceManager) GetWorkflowAttributes(
return nil, err
}
workflowAttributesModel, err := m.db.ResourceRepo().Get(
- ctx, repo_interface.ResourceID{Project: request.Project, Domain: request.Domain, Workflow: request.Workflow, ResourceType: request.ResourceType.String()})
+ ctx, repo_interface.ResourceID{Project: request.GetProject(), Domain: request.GetDomain(), Workflow: request.GetWorkflow(), ResourceType: request.GetResourceType().String()})
if err != nil {
return nil, err
}
@@ -137,11 +137,11 @@ func (m *ResourceManager) DeleteWorkflowAttributes(ctx context.Context,
return nil, err
}
if err := m.db.ResourceRepo().Delete(
- ctx, repo_interface.ResourceID{Project: request.Project, Domain: request.Domain, Workflow: request.Workflow, ResourceType: request.ResourceType.String()}); err != nil {
+ ctx, repo_interface.ResourceID{Project: request.GetProject(), Domain: request.GetDomain(), Workflow: request.GetWorkflow(), ResourceType: request.GetResourceType().String()}); err != nil {
return nil, err
}
- logger.Infof(ctx, "Deleted workflow attributes for: %s-%s-%s (%s)", request.Project,
- request.Domain, request.Workflow, request.ResourceType.String())
+ logger.Infof(ctx, "Deleted workflow attributes for: %s-%s-%s (%s)", request.GetProject(),
+ request.GetDomain(), request.GetWorkflow(), request.GetResourceType().String())
return &admin.WorkflowAttributesDeleteResponse{}, nil
}
@@ -154,12 +154,12 @@ func (m *ResourceManager) UpdateProjectAttributes(ctx context.Context, request *
if resource, err = validation.ValidateProjectAttributesUpdateRequest(ctx, m.db, request); err != nil {
return nil, err
}
- model, err := transformers.ProjectAttributesToResourceModel(request.Attributes, resource)
+ model, err := transformers.ProjectAttributesToResourceModel(request.GetAttributes(), resource)
if err != nil {
return nil, err
}
- if request.Attributes.GetMatchingAttributes().GetPluginOverrides() != nil {
+ if request.GetAttributes().GetMatchingAttributes().GetPluginOverrides() != nil {
return m.createOrMergeUpdateProjectAttributes(ctx, request, model, admin.MatchableResource_PLUGIN_OVERRIDE)
}
@@ -174,12 +174,12 @@ func (m *ResourceManager) UpdateProjectAttributes(ctx context.Context, request *
func (m *ResourceManager) GetProjectAttributesBase(ctx context.Context, request *admin.ProjectAttributesGetRequest) (
*admin.ProjectAttributesGetResponse, error) {
- if err := validation.ValidateProjectExists(ctx, m.db, request.Project); err != nil {
+ if err := validation.ValidateProjectExists(ctx, m.db, request.GetProject()); err != nil {
return nil, err
}
projectAttributesModel, err := m.db.ResourceRepo().GetProjectLevel(
- ctx, repo_interface.ResourceID{Project: request.Project, Domain: "", ResourceType: request.ResourceType.String()})
+ ctx, repo_interface.ResourceID{Project: request.GetProject(), Domain: "", ResourceType: request.GetResourceType().String()})
if err != nil {
return nil, err
}
@@ -191,8 +191,8 @@ func (m *ResourceManager) GetProjectAttributesBase(ctx context.Context, request
return &admin.ProjectAttributesGetResponse{
Attributes: &admin.ProjectAttributes{
- Project: request.Project,
- MatchingAttributes: ma.Attributes,
+ Project: request.GetProject(),
+ MatchingAttributes: ma.GetAttributes(),
},
}, nil
}
@@ -208,11 +208,11 @@ func (m *ResourceManager) GetProjectAttributes(ctx context.Context, request *adm
configLevelDefaults := m.config.GetTopLevelConfig().GetAsWorkflowExecutionConfig()
if err != nil {
ec, ok := err.(errors.FlyteAdminError)
- if ok && ec.Code() == codes.NotFound && request.ResourceType == admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG {
+ if ok && ec.Code() == codes.NotFound && request.GetResourceType() == admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG {
// TODO: Will likely be removed after overarching settings project is done
return &admin.ProjectAttributesGetResponse{
Attributes: &admin.ProjectAttributes{
- Project: request.Project,
+ Project: request.GetProject(),
MatchingAttributes: &admin.MatchingAttributes{
Target: &admin.MatchingAttributes_WorkflowExecutionConfig{
WorkflowExecutionConfig: configLevelDefaults,
@@ -227,14 +227,14 @@ func (m *ResourceManager) GetProjectAttributes(ctx context.Context, request *adm
// If found, then merge result with the default values for the platform
// TODO: Remove this logic once the overarching settings project is done. Those endpoints should take
// default configuration into account.
- responseAttributes := getResponse.Attributes.GetMatchingAttributes().GetWorkflowExecutionConfig()
+ responseAttributes := getResponse.GetAttributes().GetMatchingAttributes().GetWorkflowExecutionConfig()
if responseAttributes != nil {
logger.Warningf(ctx, "Merging response %s with defaults %s", responseAttributes, configLevelDefaults)
tmp := util.MergeIntoExecConfig(responseAttributes, configLevelDefaults)
responseAttributes = tmp
return &admin.ProjectAttributesGetResponse{
Attributes: &admin.ProjectAttributes{
- Project: request.Project,
+ Project: request.GetProject(),
MatchingAttributes: &admin.MatchingAttributes{
Target: &admin.MatchingAttributes_WorkflowExecutionConfig{
WorkflowExecutionConfig: responseAttributes,
@@ -250,14 +250,14 @@ func (m *ResourceManager) GetProjectAttributes(ctx context.Context, request *adm
func (m *ResourceManager) DeleteProjectAttributes(ctx context.Context, request *admin.ProjectAttributesDeleteRequest) (
*admin.ProjectAttributesDeleteResponse, error) {
- if err := validation.ValidateProjectForUpdate(ctx, m.db, request.Project); err != nil {
+ if err := validation.ValidateProjectForUpdate(ctx, m.db, request.GetProject()); err != nil {
return nil, err
}
if err := m.db.ResourceRepo().Delete(
- ctx, repo_interface.ResourceID{Project: request.Project, ResourceType: request.ResourceType.String()}); err != nil {
+ ctx, repo_interface.ResourceID{Project: request.GetProject(), ResourceType: request.GetResourceType().String()}); err != nil {
return nil, err
}
- logger.Infof(ctx, "Deleted project attributes for: %s-%s (%s)", request.Project, request.ResourceType.String())
+ logger.Infof(ctx, "Deleted project attributes for: %s-%s (%s)", request.GetProject(), request.GetResourceType().String())
return &admin.ProjectAttributesDeleteResponse{}, nil
}
@@ -285,7 +285,7 @@ func (m *ResourceManager) createOrMergeUpdateProjectDomainAttributes(
return nil, err
}
updatedModel, err := transformers.MergeUpdatePluginAttributes(
- ctx, existing, resourceType, &resourceID, request.Attributes.MatchingAttributes)
+ ctx, existing, resourceType, &resourceID, request.GetAttributes().GetMatchingAttributes())
if err != nil {
return nil, err
}
@@ -321,7 +321,7 @@ func (m *ResourceManager) createOrMergeUpdateProjectAttributes(
return nil, err
}
updatedModel, err := transformers.MergeUpdatePluginAttributes(
- ctx, existing, resourceType, &resourceID, request.Attributes.MatchingAttributes)
+ ctx, existing, resourceType, &resourceID, request.GetAttributes().GetMatchingAttributes())
if err != nil {
return nil, err
}
@@ -340,13 +340,13 @@ func (m *ResourceManager) UpdateProjectDomainAttributes(
if resource, err = validation.ValidateProjectDomainAttributesUpdateRequest(ctx, m.db, m.config, request); err != nil {
return nil, err
}
- ctx = contextutils.WithProjectDomain(ctx, request.Attributes.Project, request.Attributes.Domain)
+ ctx = contextutils.WithProjectDomain(ctx, request.GetAttributes().GetProject(), request.GetAttributes().GetDomain())
- model, err := transformers.ProjectDomainAttributesToResourceModel(request.Attributes, resource)
+ model, err := transformers.ProjectDomainAttributesToResourceModel(request.GetAttributes(), resource)
if err != nil {
return nil, err
}
- if request.Attributes.GetMatchingAttributes().GetPluginOverrides() != nil {
+ if request.GetAttributes().GetMatchingAttributes().GetPluginOverrides() != nil {
return m.createOrMergeUpdateProjectDomainAttributes(ctx, request, model, admin.MatchableResource_PLUGIN_OVERRIDE)
}
err = m.db.ResourceRepo().CreateOrUpdate(ctx, model)
@@ -363,7 +363,7 @@ func (m *ResourceManager) GetProjectDomainAttributes(
return nil, err
}
projectAttributesModel, err := m.db.ResourceRepo().Get(
- ctx, repo_interface.ResourceID{Project: request.Project, Domain: request.Domain, ResourceType: request.ResourceType.String()})
+ ctx, repo_interface.ResourceID{Project: request.GetProject(), Domain: request.GetDomain(), ResourceType: request.GetResourceType().String()})
if err != nil {
return nil, err
}
@@ -382,11 +382,11 @@ func (m *ResourceManager) DeleteProjectDomainAttributes(ctx context.Context,
return nil, err
}
if err := m.db.ResourceRepo().Delete(
- ctx, repo_interface.ResourceID{Project: request.Project, Domain: request.Domain, ResourceType: request.ResourceType.String()}); err != nil {
+ ctx, repo_interface.ResourceID{Project: request.GetProject(), Domain: request.GetDomain(), ResourceType: request.GetResourceType().String()}); err != nil {
return nil, err
}
- logger.Infof(ctx, "Deleted project-domain attributes for: %s-%s (%s)", request.Project,
- request.Domain, request.ResourceType.String())
+ logger.Infof(ctx, "Deleted project-domain attributes for: %s-%s (%s)", request.GetProject(),
+ request.GetDomain(), request.GetResourceType().String())
return &admin.ProjectDomainAttributesDeleteResponse{}, nil
}
@@ -395,7 +395,7 @@ func (m *ResourceManager) ListAll(ctx context.Context, request *admin.ListMatcha
if err := validation.ValidateListAllMatchableAttributesRequest(request); err != nil {
return nil, err
}
- resources, err := m.db.ResourceRepo().ListAll(ctx, request.ResourceType.String())
+ resources, err := m.db.ResourceRepo().ListAll(ctx, request.GetResourceType().String())
if err != nil {
return nil, err
}
diff --git a/flyteadmin/pkg/manager/impl/resources/resource_manager_test.go b/flyteadmin/pkg/manager/impl/resources/resource_manager_test.go
index 8352de1d7b..be03d642ab 100644
--- a/flyteadmin/pkg/manager/impl/resources/resource_manager_test.go
+++ b/flyteadmin/pkg/manager/impl/resources/resource_manager_test.go
@@ -83,8 +83,8 @@ func TestUpdateWorkflowAttributes_CreateOrMerge(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- assert.Len(t, attributesToBeSaved.GetPluginOverrides().Overrides, 1)
- assert.True(t, proto.Equal(attributesToBeSaved.GetPluginOverrides().Overrides[0], &admin.PluginOverride{
+ assert.Len(t, attributesToBeSaved.GetPluginOverrides().GetOverrides(), 1)
+ assert.True(t, proto.Equal(attributesToBeSaved.GetPluginOverrides().GetOverrides()[0], &admin.PluginOverride{
TaskType: "python",
PluginId: []string{"plugin a"}}))
@@ -127,14 +127,14 @@ func TestUpdateWorkflowAttributes_CreateOrMerge(t *testing.T) {
t.Fatal(err)
}
- assert.Len(t, attributesToBeSaved.GetPluginOverrides().Overrides, 2)
- for _, override := range attributesToBeSaved.GetPluginOverrides().Overrides {
- if override.TaskType == python {
- assert.EqualValues(t, []string{"plugin a"}, override.PluginId)
- } else if override.TaskType == hive {
- assert.EqualValues(t, []string{"plugin b"}, override.PluginId)
+ assert.Len(t, attributesToBeSaved.GetPluginOverrides().GetOverrides(), 2)
+ for _, override := range attributesToBeSaved.GetPluginOverrides().GetOverrides() {
+ if override.GetTaskType() == python {
+ assert.EqualValues(t, []string{"plugin a"}, override.GetPluginId())
+ } else if override.GetTaskType() == hive {
+ assert.EqualValues(t, []string{"plugin b"}, override.GetPluginId())
} else {
- t.Errorf("Unexpected task type [%s] plugin override committed to db", override.TaskType)
+ t.Errorf("Unexpected task type [%s] plugin override committed to db", override.GetTaskType())
}
}
createOrUpdateCalled = true
@@ -256,8 +256,8 @@ func TestUpdateProjectDomainAttributes_CreateOrMerge(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- assert.Len(t, attributesToBeSaved.GetPluginOverrides().Overrides, 1)
- assert.True(t, proto.Equal(attributesToBeSaved.GetPluginOverrides().Overrides[0], &admin.PluginOverride{
+ assert.Len(t, attributesToBeSaved.GetPluginOverrides().GetOverrides(), 1)
+ assert.True(t, proto.Equal(attributesToBeSaved.GetPluginOverrides().GetOverrides()[0], &admin.PluginOverride{
TaskType: python,
PluginId: []string{"plugin a"}}))
@@ -298,14 +298,14 @@ func TestUpdateProjectDomainAttributes_CreateOrMerge(t *testing.T) {
t.Fatal(err)
}
- assert.Len(t, attributesToBeSaved.GetPluginOverrides().Overrides, 2)
- for _, override := range attributesToBeSaved.GetPluginOverrides().Overrides {
- if override.TaskType == python {
- assert.EqualValues(t, []string{"plugin a"}, override.PluginId)
- } else if override.TaskType == hive {
- assert.EqualValues(t, []string{"plugin b"}, override.PluginId)
+ assert.Len(t, attributesToBeSaved.GetPluginOverrides().GetOverrides(), 2)
+ for _, override := range attributesToBeSaved.GetPluginOverrides().GetOverrides() {
+ if override.GetTaskType() == python {
+ assert.EqualValues(t, []string{"plugin a"}, override.GetPluginId())
+ } else if override.GetTaskType() == hive {
+ assert.EqualValues(t, []string{"plugin b"}, override.GetPluginId())
} else {
- t.Errorf("Unexpected task type [%s] plugin override committed to db", override.TaskType)
+ t.Errorf("Unexpected task type [%s] plugin override committed to db", override.GetTaskType())
}
}
createOrUpdateCalled = true
@@ -439,8 +439,8 @@ func TestUpdateProjectAttributes_CreateOrMerge(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- assert.Len(t, attributesToBeSaved.GetPluginOverrides().Overrides, 1)
- assert.True(t, proto.Equal(attributesToBeSaved.GetPluginOverrides().Overrides[0], &admin.PluginOverride{
+ assert.Len(t, attributesToBeSaved.GetPluginOverrides().GetOverrides(), 1)
+ assert.True(t, proto.Equal(attributesToBeSaved.GetPluginOverrides().GetOverrides()[0], &admin.PluginOverride{
TaskType: python,
PluginId: []string{"plugin a"}}))
@@ -480,14 +480,14 @@ func TestUpdateProjectAttributes_CreateOrMerge(t *testing.T) {
t.Fatal(err)
}
- assert.Len(t, attributesToBeSaved.GetPluginOverrides().Overrides, 2)
- for _, override := range attributesToBeSaved.GetPluginOverrides().Overrides {
- if override.TaskType == python {
- assert.EqualValues(t, []string{"plugin a"}, override.PluginId)
- } else if override.TaskType == hive {
- assert.EqualValues(t, []string{"plugin b"}, override.PluginId)
+ assert.Len(t, attributesToBeSaved.GetPluginOverrides().GetOverrides(), 2)
+ for _, override := range attributesToBeSaved.GetPluginOverrides().GetOverrides() {
+ if override.GetTaskType() == python {
+ assert.EqualValues(t, []string{"plugin a"}, override.GetPluginId())
+ } else if override.GetTaskType() == hive {
+ assert.EqualValues(t, []string{"plugin b"}, override.GetPluginId())
} else {
- t.Errorf("Unexpected task type [%s] plugin override committed to db", override.TaskType)
+ t.Errorf("Unexpected task type [%s] plugin override committed to db", override.GetTaskType())
}
}
createOrUpdateCalled = true
@@ -763,16 +763,16 @@ func TestListAllResources(t *testing.T) {
ResourceType: admin.MatchableResource_CLUSTER_RESOURCE,
})
assert.Nil(t, err)
- assert.NotNil(t, response.Configurations)
- assert.Len(t, response.Configurations, 2)
+ assert.NotNil(t, response.GetConfigurations())
+ assert.Len(t, response.GetConfigurations(), 2)
assert.True(t, proto.Equal(&admin.MatchableAttributesConfiguration{
Project: "projectA",
Attributes: &projectAttributes,
- }, response.Configurations[0]))
+ }, response.GetConfigurations()[0]))
assert.True(t, proto.Equal(&admin.MatchableAttributesConfiguration{
Project: "projectB",
Domain: "development",
Workflow: "workflow",
Attributes: &workflowAttributes,
- }, response.Configurations[1]))
+ }, response.GetConfigurations()[1]))
}
diff --git a/flyteadmin/pkg/manager/impl/signal_manager.go b/flyteadmin/pkg/manager/impl/signal_manager.go
index f98edae674..7da9dd5f68 100644
--- a/flyteadmin/pkg/manager/impl/signal_manager.go
+++ b/flyteadmin/pkg/manager/impl/signal_manager.go
@@ -33,9 +33,9 @@ type SignalManager struct {
}
func getSignalContext(ctx context.Context, identifier *core.SignalIdentifier) context.Context {
- ctx = contextutils.WithProjectDomain(ctx, identifier.ExecutionId.Project, identifier.ExecutionId.Domain)
- ctx = contextutils.WithWorkflowID(ctx, identifier.ExecutionId.Name)
- return contextutils.WithSignalID(ctx, identifier.SignalId)
+ ctx = contextutils.WithProjectDomain(ctx, identifier.GetExecutionId().GetProject(), identifier.GetExecutionId().GetDomain())
+ ctx = contextutils.WithWorkflowID(ctx, identifier.GetExecutionId().GetName())
+ return contextutils.WithSignalID(ctx, identifier.GetSignalId())
}
func (s *SignalManager) GetOrCreateSignal(ctx context.Context, request *admin.SignalGetOrCreateRequest) (*admin.Signal, error) {
@@ -43,11 +43,11 @@ func (s *SignalManager) GetOrCreateSignal(ctx context.Context, request *admin.Si
logger.Debugf(ctx, "invalid request [%+v]: %v", request, err)
return nil, err
}
- ctx = getSignalContext(ctx, request.Id)
+ ctx = getSignalContext(ctx, request.GetId())
- signalModel, err := transformers.CreateSignalModel(request.Id, request.Type, nil)
+ signalModel, err := transformers.CreateSignalModel(request.GetId(), request.GetType(), nil)
if err != nil {
- logger.Errorf(ctx, "Failed to transform signal with id [%+v] and type [+%v] with err: %v", request.Id, request.Type, err)
+ logger.Errorf(ctx, "Failed to transform signal with id [%+v] and type [+%v] with err: %v", request.GetId(), request.GetType(), err)
return nil, err
}
@@ -70,33 +70,33 @@ func (s *SignalManager) ListSignals(ctx context.Context, request *admin.SignalLi
logger.Debugf(ctx, "ListSignals request [%+v] is invalid: %v", request, err)
return nil, err
}
- ctx = getExecutionContext(ctx, request.WorkflowExecutionId)
+ ctx = getExecutionContext(ctx, request.GetWorkflowExecutionId())
- identifierFilters, err := util.GetWorkflowExecutionIdentifierFilters(ctx, request.WorkflowExecutionId, common.Signal)
+ identifierFilters, err := util.GetWorkflowExecutionIdentifierFilters(ctx, request.GetWorkflowExecutionId(), common.Signal)
if err != nil {
return nil, err
}
- filters, err := util.AddRequestFilters(request.Filters, common.Signal, identifierFilters)
+ filters, err := util.AddRequestFilters(request.GetFilters(), common.Signal, identifierFilters)
if err != nil {
return nil, err
}
- sortParameter, err := common.NewSortParameter(request.SortBy, models.SignalColumns)
+ sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.SignalColumns)
if err != nil {
return nil, err
}
- offset, err := validation.ValidateToken(request.Token)
+ offset, err := validation.ValidateToken(request.GetToken())
if err != nil {
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument,
- "invalid pagination token %s for ListSignals", request.Token)
+ "invalid pagination token %s for ListSignals", request.GetToken())
}
signalModelList, err := s.db.SignalRepo().List(ctx, repoInterfaces.ListResourceInput{
InlineFilters: filters,
Offset: offset,
- Limit: int(request.Limit),
+ Limit: int(request.GetLimit()),
SortParameter: sortParameter,
})
if err != nil {
@@ -111,7 +111,7 @@ func (s *SignalManager) ListSignals(ctx context.Context, request *admin.SignalLi
return nil, err
}
var token string
- if len(signalList) == int(request.Limit) {
+ if len(signalList) == int(request.GetLimit()) {
token = strconv.Itoa(offset + len(signalList))
}
return &admin.SignalList{
@@ -124,11 +124,11 @@ func (s *SignalManager) SetSignal(ctx context.Context, request *admin.SignalSetR
if err := validation.ValidateSignalSetRequest(ctx, s.db, request); err != nil {
return nil, err
}
- ctx = getSignalContext(ctx, request.Id)
+ ctx = getSignalContext(ctx, request.GetId())
- signalModel, err := transformers.CreateSignalModel(request.Id, nil, request.Value)
+ signalModel, err := transformers.CreateSignalModel(request.GetId(), nil, request.GetValue())
if err != nil {
- logger.Errorf(ctx, "Failed to transform signal with id [%+v] and value [+%v] with err: %v", request.Id, request.Value, err)
+ logger.Errorf(ctx, "Failed to transform signal with id [%+v] and value [+%v] with err: %v", request.GetId(), request.GetValue(), err)
return nil, err
}
diff --git a/flyteadmin/pkg/manager/impl/task_execution_manager.go b/flyteadmin/pkg/manager/impl/task_execution_manager.go
index f8b8e12e21..801d612ab2 100644
--- a/flyteadmin/pkg/manager/impl/task_execution_manager.go
+++ b/flyteadmin/pkg/manager/impl/task_execution_manager.go
@@ -56,15 +56,15 @@ type TaskExecutionManager struct {
}
func getTaskExecutionContext(ctx context.Context, identifier *core.TaskExecutionIdentifier) context.Context {
- ctx = getNodeExecutionContext(ctx, identifier.NodeExecutionId)
- return contextutils.WithTaskID(ctx, fmt.Sprintf("%s-%v", identifier.TaskId.Name, identifier.RetryAttempt))
+ ctx = getNodeExecutionContext(ctx, identifier.GetNodeExecutionId())
+ return contextutils.WithTaskID(ctx, fmt.Sprintf("%s-%v", identifier.GetTaskId().GetName(), identifier.GetRetryAttempt()))
}
func (m *TaskExecutionManager) createTaskExecution(
ctx context.Context, request *admin.TaskExecutionEventRequest) (
models.TaskExecution, error) {
- nodeExecutionID := request.Event.ParentNodeExecutionId
+ nodeExecutionID := request.GetEvent().GetParentNodeExecutionId()
nodeExecutionExists, err := m.db.NodeExecutionRepo().Exists(ctx, repoInterfaces.NodeExecutionResource{
NodeExecutionIdentifier: nodeExecutionID,
})
@@ -88,19 +88,19 @@ func (m *TaskExecutionManager) createTaskExecution(
StorageClient: m.storageClient,
})
if err != nil {
- logger.Debugf(ctx, "failed to transform task execution %+v into database model: %v", request.Event.TaskId, err)
+ logger.Debugf(ctx, "failed to transform task execution %+v into database model: %v", request.GetEvent().GetTaskId(), err)
return models.TaskExecution{}, err
}
if err := m.db.TaskExecutionRepo().Create(ctx, *taskExecutionModel); err != nil {
logger.Debugf(ctx, "Failed to create task execution with task id [%+v] with err %v",
- request.Event.TaskId, err)
+ request.GetEvent().GetTaskId(), err)
return models.TaskExecution{}, err
}
m.metrics.TaskExecutionsCreated.Inc()
m.metrics.ClosureSizeBytes.Observe(float64(len(taskExecutionModel.Closure)))
- logger.Debugf(ctx, "created task execution: %+v", request.Event.TaskId)
+ logger.Debugf(ctx, "created task execution: %+v", request.GetEvent().GetTaskId())
return *taskExecutionModel, nil
}
@@ -111,14 +111,14 @@ func (m *TaskExecutionManager) updateTaskExecutionModelState(
err := transformers.UpdateTaskExecutionModel(ctx, request, existingTaskExecution,
m.config.ApplicationConfiguration().GetRemoteDataConfig().InlineEventDataPolicy, m.storageClient)
if err != nil {
- logger.Debugf(ctx, "failed to update task execution model [%+v] with err: %v", request.Event.TaskId, err)
+ logger.Debugf(ctx, "failed to update task execution model [%+v] with err: %v", request.GetEvent().GetTaskId(), err)
return models.TaskExecution{}, err
}
err = m.db.TaskExecutionRepo().Update(ctx, *existingTaskExecution)
if err != nil {
logger.Debugf(ctx, "Failed to update task execution with task id [%+v] and task execution model [%+v] with err %v",
- request.Event.TaskId, existingTaskExecution, err)
+ request.GetEvent().GetTaskId(), existingTaskExecution, err)
return models.TaskExecution{}, err
}
@@ -132,20 +132,20 @@ func (m *TaskExecutionManager) CreateTaskExecutionEvent(ctx context.Context, req
return nil, err
}
- if err := validation.ValidateClusterForExecutionID(ctx, m.db, request.Event.ParentNodeExecutionId.ExecutionId, request.Event.ProducerId); err != nil {
+ if err := validation.ValidateClusterForExecutionID(ctx, m.db, request.GetEvent().GetParentNodeExecutionId().GetExecutionId(), request.GetEvent().GetProducerId()); err != nil {
return nil, err
}
// Get the parent node execution, if none found a MissingEntityError will be returned
- nodeExecutionID := request.Event.ParentNodeExecutionId
+ nodeExecutionID := request.GetEvent().GetParentNodeExecutionId()
taskExecutionID := &core.TaskExecutionIdentifier{
- TaskId: request.Event.TaskId,
+ TaskId: request.GetEvent().GetTaskId(),
NodeExecutionId: nodeExecutionID,
- RetryAttempt: request.Event.RetryAttempt,
+ RetryAttempt: request.GetEvent().GetRetryAttempt(),
}
ctx = getTaskExecutionContext(ctx, taskExecutionID)
logger.Debugf(ctx, "Received task execution event for [%+v] transitioning to phase [%v]",
- taskExecutionID, request.Event.Phase)
+ taskExecutionID, request.GetEvent().GetPhase())
// See if the task execution exists
// - if it does check if the new phase is applicable and then update
@@ -166,20 +166,20 @@ func (m *TaskExecutionManager) CreateTaskExecutionEvent(ctx context.Context, req
return &admin.TaskExecutionEventResponse{}, nil
}
- if taskExecutionModel.Phase == request.Event.Phase.String() &&
- taskExecutionModel.PhaseVersion >= request.Event.PhaseVersion {
+ if taskExecutionModel.Phase == request.GetEvent().GetPhase().String() &&
+ taskExecutionModel.PhaseVersion >= request.GetEvent().GetPhaseVersion() {
logger.Debugf(ctx, "have already recorded task execution phase %s (version: %d) for %v",
- request.Event.Phase.String(), request.Event.PhaseVersion, taskExecutionID)
+ request.GetEvent().GetPhase().String(), request.GetEvent().GetPhaseVersion(), taskExecutionID)
return nil, errors.NewFlyteAdminErrorf(codes.AlreadyExists,
"have already recorded task execution phase %s (version: %d) for %v",
- request.Event.Phase.String(), request.Event.PhaseVersion, taskExecutionID)
+ request.GetEvent().GetPhase().String(), request.GetEvent().GetPhaseVersion(), taskExecutionID)
}
currentPhase := core.TaskExecution_Phase(core.TaskExecution_Phase_value[taskExecutionModel.Phase])
if common.IsTaskExecutionTerminal(currentPhase) {
// Cannot update a terminal execution.
- curPhase := request.Event.Phase.String()
- errorMsg := fmt.Sprintf("invalid phase change from %v to %v for task execution %v", taskExecutionModel.Phase, request.Event.Phase, taskExecutionID)
+ curPhase := request.GetEvent().GetPhase().String()
+ errorMsg := fmt.Sprintf("invalid phase change from %v to %v for task execution %v", taskExecutionModel.Phase, request.GetEvent().GetPhase(), taskExecutionID)
logger.Warnf(ctx, errorMsg)
return nil, errors.NewAlreadyInTerminalStateError(ctx, errorMsg, curPhase)
}
@@ -191,49 +191,49 @@ func (m *TaskExecutionManager) CreateTaskExecutionEvent(ctx context.Context, req
return nil, err
}
- if request.Event.Phase == core.TaskExecution_RUNNING && request.Event.PhaseVersion == 0 { // TODO: need to be careful about missing inc/decs
+ if request.GetEvent().GetPhase() == core.TaskExecution_RUNNING && request.GetEvent().GetPhaseVersion() == 0 { // TODO: need to be careful about missing inc/decs
m.metrics.ActiveTaskExecutions.Inc()
- } else if common.IsTaskExecutionTerminal(request.Event.Phase) && request.Event.PhaseVersion == 0 {
+ } else if common.IsTaskExecutionTerminal(request.GetEvent().GetPhase()) && request.GetEvent().GetPhaseVersion() == 0 {
m.metrics.ActiveTaskExecutions.Dec()
- m.metrics.TaskExecutionsTerminated.Inc(contextutils.WithPhase(ctx, request.Event.Phase.String()))
- if request.Event.GetOutputData() != nil {
- m.metrics.TaskExecutionOutputBytes.Observe(float64(proto.Size(request.Event.GetOutputData())))
+ m.metrics.TaskExecutionsTerminated.Inc(contextutils.WithPhase(ctx, request.GetEvent().GetPhase().String()))
+ if request.GetEvent().GetOutputData() != nil {
+ m.metrics.TaskExecutionOutputBytes.Observe(float64(proto.Size(request.GetEvent().GetOutputData())))
}
}
if err = m.notificationClient.Publish(ctx, proto.MessageName(request), request); err != nil {
m.metrics.PublishEventError.Inc()
- logger.Infof(ctx, "error publishing event [%+v] with err: [%v]", request.RequestId, err)
+ logger.Infof(ctx, "error publishing event [%+v] with err: [%v]", request.GetRequestId(), err)
}
go func() {
ceCtx := context.TODO()
if err := m.cloudEventsPublisher.Publish(ceCtx, proto.MessageName(request), request); err != nil {
- logger.Errorf(ctx, "error publishing cloud event [%+v] with err: [%v]", request.RequestId, err)
+ logger.Errorf(ctx, "error publishing cloud event [%+v] with err: [%v]", request.GetRequestId(), err)
}
}()
m.metrics.TaskExecutionEventsCreated.Inc()
- logger.Debugf(ctx, "Successfully recorded task execution event [%v]", request.Event)
+ logger.Debugf(ctx, "Successfully recorded task execution event [%v]", request.GetEvent())
// TODO: we will want to return some scope information here soon!
return &admin.TaskExecutionEventResponse{}, nil
}
func (m *TaskExecutionManager) GetTaskExecution(
ctx context.Context, request *admin.TaskExecutionGetRequest) (*admin.TaskExecution, error) {
- err := validation.ValidateTaskExecutionIdentifier(request.Id)
+ err := validation.ValidateTaskExecutionIdentifier(request.GetId())
if err != nil {
- logger.Debugf(ctx, "Failed to validate GetTaskExecution [%+v] with err: %v", request.Id, err)
+ logger.Debugf(ctx, "Failed to validate GetTaskExecution [%+v] with err: %v", request.GetId(), err)
return nil, err
}
- ctx = getTaskExecutionContext(ctx, request.Id)
- taskExecutionModel, err := util.GetTaskExecutionModel(ctx, m.db, request.Id)
+ ctx = getTaskExecutionContext(ctx, request.GetId())
+ taskExecutionModel, err := util.GetTaskExecutionModel(ctx, m.db, request.GetId())
if err != nil {
return nil, err
}
taskExecution, err := transformers.FromTaskExecutionModel(*taskExecutionModel, transformers.DefaultExecutionTransformerOptions)
if err != nil {
- logger.Debugf(ctx, "Failed to transform task execution model [%+v] to proto: %v", request.Id, err)
+ logger.Debugf(ctx, "Failed to transform task execution model [%+v] to proto: %v", request.GetId(), err)
return nil, err
}
return taskExecution, nil
@@ -245,27 +245,27 @@ func (m *TaskExecutionManager) ListTaskExecutions(
logger.Debugf(ctx, "ListTaskExecutions request [%+v] is invalid: %v", request, err)
return nil, err
}
- ctx = getNodeExecutionContext(ctx, request.NodeExecutionId)
+ ctx = getNodeExecutionContext(ctx, request.GetNodeExecutionId())
- identifierFilters, err := util.GetNodeExecutionIdentifierFilters(ctx, request.NodeExecutionId, common.TaskExecution)
+ identifierFilters, err := util.GetNodeExecutionIdentifierFilters(ctx, request.GetNodeExecutionId(), common.TaskExecution)
if err != nil {
return nil, err
}
- filters, err := util.AddRequestFilters(request.Filters, common.TaskExecution, identifierFilters)
+ filters, err := util.AddRequestFilters(request.GetFilters(), common.TaskExecution, identifierFilters)
if err != nil {
return nil, err
}
- sortParameter, err := common.NewSortParameter(request.SortBy, models.TaskExecutionColumns)
+ sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.TaskExecutionColumns)
if err != nil {
return nil, err
}
- offset, err := validation.ValidateToken(request.Token)
+ offset, err := validation.ValidateToken(request.GetToken())
if err != nil {
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument,
- "invalid pagination token %s for ListTaskExecutions", request.Token)
+ "invalid pagination token %s for ListTaskExecutions", request.GetToken())
}
joinTableEntities := make(map[common.Entity]bool)
for _, filter := range filters {
@@ -275,7 +275,7 @@ func (m *TaskExecutionManager) ListTaskExecutions(
output, err := m.db.TaskExecutionRepo().List(ctx, repoInterfaces.ListResourceInput{
InlineFilters: filters,
Offset: offset,
- Limit: int(request.Limit),
+ Limit: int(request.GetLimit()),
SortParameter: sortParameter,
JoinTableEntities: joinTableEntities,
})
@@ -292,7 +292,7 @@ func (m *TaskExecutionManager) ListTaskExecutions(
return nil, err
}
var token string
- if len(taskExecutionList) == int(request.Limit) {
+ if len(taskExecutionList) == int(request.GetLimit()) {
token = strconv.Itoa(offset + len(taskExecutionList))
}
return &admin.TaskExecutionList{
@@ -303,16 +303,16 @@ func (m *TaskExecutionManager) ListTaskExecutions(
func (m *TaskExecutionManager) GetTaskExecutionData(
ctx context.Context, request *admin.TaskExecutionGetDataRequest) (*admin.TaskExecutionGetDataResponse, error) {
- if err := validation.ValidateTaskExecutionIdentifier(request.Id); err != nil {
- logger.Debugf(ctx, "Invalid identifier [%+v]: %v", request.Id, err)
+ if err := validation.ValidateTaskExecutionIdentifier(request.GetId()); err != nil {
+ logger.Debugf(ctx, "Invalid identifier [%+v]: %v", request.GetId(), err)
}
- ctx = getTaskExecutionContext(ctx, request.Id)
+ ctx = getTaskExecutionContext(ctx, request.GetId())
taskExecution, err := m.GetTaskExecution(ctx, &admin.TaskExecutionGetRequest{
- Id: request.Id,
+ Id: request.GetId(),
})
if err != nil {
logger.Debugf(ctx, "Failed to get task execution with id [%+v] with err %v",
- request.Id, err)
+ request.GetId(), err)
return nil, err
}
@@ -322,7 +322,7 @@ func (m *TaskExecutionManager) GetTaskExecutionData(
group.Go(func() error {
var err error
inputs, inputURLBlob, err = util.GetInputs(groupCtx, m.urlData, m.config.ApplicationConfiguration().GetRemoteDataConfig(),
- m.storageClient, taskExecution.InputUri)
+ m.storageClient, taskExecution.GetInputUri())
return err
})
@@ -331,7 +331,7 @@ func (m *TaskExecutionManager) GetTaskExecutionData(
group.Go(func() error {
var err error
outputs, outputURLBlob, err = util.GetOutputs(groupCtx, m.urlData, m.config.ApplicationConfiguration().GetRemoteDataConfig(),
- m.storageClient, taskExecution.Closure)
+ m.storageClient, taskExecution.GetClosure())
return err
})
@@ -345,14 +345,14 @@ func (m *TaskExecutionManager) GetTaskExecutionData(
Outputs: outputURLBlob,
FullInputs: inputs,
FullOutputs: outputs,
- FlyteUrls: common.FlyteURLsFromTaskExecutionID(request.Id, false),
+ FlyteUrls: common.FlyteURLsFromTaskExecutionID(request.GetId(), false),
}
- m.metrics.TaskExecutionInputBytes.Observe(float64(response.Inputs.Bytes))
- if response.Outputs.Bytes > 0 {
- m.metrics.TaskExecutionOutputBytes.Observe(float64(response.Outputs.Bytes))
- } else if response.FullOutputs != nil {
- m.metrics.TaskExecutionOutputBytes.Observe(float64(proto.Size(response.FullOutputs)))
+ m.metrics.TaskExecutionInputBytes.Observe(float64(response.GetInputs().GetBytes()))
+ if response.GetOutputs().GetBytes() > 0 {
+ m.metrics.TaskExecutionOutputBytes.Observe(float64(response.GetOutputs().GetBytes()))
+ } else if response.GetFullOutputs() != nil {
+ m.metrics.TaskExecutionOutputBytes.Observe(float64(proto.Size(response.GetFullOutputs())))
}
return response, nil
}
diff --git a/flyteadmin/pkg/manager/impl/task_execution_manager_test.go b/flyteadmin/pkg/manager/impl/task_execution_manager_test.go
index 7e2a14131e..939086d63d 100644
--- a/flyteadmin/pkg/manager/impl/task_execution_manager_test.go
+++ b/flyteadmin/pkg/manager/impl/task_execution_manager_test.go
@@ -72,9 +72,9 @@ func addGetWorkflowExecutionCallback(repository interfaces.Repository) {
func(ctx context.Context, input interfaces.Identifier) (models.Execution, error) {
return models.Execution{
ExecutionKey: models.ExecutionKey{
- Project: sampleNodeExecID.ExecutionId.Project,
- Domain: sampleNodeExecID.ExecutionId.Domain,
- Name: sampleNodeExecID.ExecutionId.Name,
+ Project: sampleNodeExecID.GetExecutionId().GetProject(),
+ Domain: sampleNodeExecID.GetExecutionId().GetDomain(),
+ Name: sampleNodeExecID.GetExecutionId().GetName(),
},
Cluster: "propeller",
}, nil
@@ -88,11 +88,11 @@ func addGetNodeExecutionCallback(repository interfaces.Repository) {
func(ctx context.Context, input interfaces.NodeExecutionResource) (models.NodeExecution, error) {
return models.NodeExecution{
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: sampleNodeExecID.NodeId,
+ NodeID: sampleNodeExecID.GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: sampleNodeExecID.ExecutionId.Project,
- Domain: sampleNodeExecID.ExecutionId.Domain,
- Name: sampleNodeExecID.ExecutionId.Name,
+ Project: sampleNodeExecID.GetExecutionId().GetProject(),
+ Domain: sampleNodeExecID.GetExecutionId().GetDomain(),
+ Name: sampleNodeExecID.GetExecutionId().GetName(),
},
},
}, nil
@@ -105,10 +105,10 @@ func addGetTaskCallback(repository interfaces.Repository) {
func(input interfaces.Identifier) (models.Task, error) {
return models.Task{
TaskKey: models.TaskKey{
- Project: sampleTaskID.Project,
- Domain: sampleTaskID.Domain,
- Name: sampleTaskID.Name,
- Version: sampleTaskID.Version,
+ Project: sampleTaskID.GetProject(),
+ Domain: sampleTaskID.GetDomain(),
+ Name: sampleTaskID.GetName(),
+ Version: sampleTaskID.GetVersion(),
},
}, nil
},
@@ -126,15 +126,15 @@ func TestCreateTaskEvent(t *testing.T) {
repository.TaskExecutionRepo().(*repositoryMocks.MockTaskExecutionRepo).SetGetCallback(
func(ctx context.Context, input interfaces.GetTaskExecutionInput) (models.TaskExecution, error) {
getTaskCalled = true
- assert.Equal(t, core.ResourceType_TASK, input.TaskExecutionID.TaskId.ResourceType)
- assert.Equal(t, "task-id", input.TaskExecutionID.TaskId.Name)
- assert.Equal(t, "project", input.TaskExecutionID.TaskId.Project)
- assert.Equal(t, "domain", input.TaskExecutionID.TaskId.Domain)
- assert.Equal(t, "task-v", input.TaskExecutionID.TaskId.Version)
- assert.Equal(t, "node-id", input.TaskExecutionID.NodeExecutionId.NodeId)
- assert.Equal(t, "project", input.TaskExecutionID.NodeExecutionId.ExecutionId.Project)
- assert.Equal(t, "domain", input.TaskExecutionID.NodeExecutionId.ExecutionId.Domain)
- assert.Equal(t, "name", input.TaskExecutionID.NodeExecutionId.ExecutionId.Name)
+ assert.Equal(t, core.ResourceType_TASK, input.TaskExecutionID.GetTaskId().GetResourceType())
+ assert.Equal(t, "task-id", input.TaskExecutionID.GetTaskId().GetName())
+ assert.Equal(t, "project", input.TaskExecutionID.GetTaskId().GetProject())
+ assert.Equal(t, "domain", input.TaskExecutionID.GetTaskId().GetDomain())
+ assert.Equal(t, "task-v", input.TaskExecutionID.GetTaskId().GetVersion())
+ assert.Equal(t, "node-id", input.TaskExecutionID.GetNodeExecutionId().GetNodeId())
+ assert.Equal(t, "project", input.TaskExecutionID.GetNodeExecutionId().GetExecutionId().GetProject())
+ assert.Equal(t, "domain", input.TaskExecutionID.GetNodeExecutionId().GetExecutionId().GetDomain())
+ assert.Equal(t, "name", input.TaskExecutionID.GetNodeExecutionId().GetExecutionId().GetName())
return models.TaskExecution{}, flyteAdminErrors.NewFlyteAdminError(codes.NotFound, "foo")
})
@@ -153,17 +153,17 @@ func TestCreateTaskEvent(t *testing.T) {
assert.Equal(t, models.TaskExecution{
TaskExecutionKey: models.TaskExecutionKey{
TaskKey: models.TaskKey{
- Project: sampleTaskID.Project,
- Domain: sampleTaskID.Domain,
- Name: sampleTaskID.Name,
- Version: sampleTaskID.Version,
+ Project: sampleTaskID.GetProject(),
+ Domain: sampleTaskID.GetDomain(),
+ Name: sampleTaskID.GetName(),
+ Version: sampleTaskID.GetVersion(),
},
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: sampleNodeExecID.NodeId,
+ NodeID: sampleNodeExecID.GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: sampleNodeExecID.ExecutionId.Project,
- Domain: sampleNodeExecID.ExecutionId.Domain,
- Name: sampleNodeExecID.ExecutionId.Name,
+ Project: sampleNodeExecID.GetExecutionId().GetProject(),
+ Domain: sampleNodeExecID.GetExecutionId().GetDomain(),
+ Name: sampleNodeExecID.GetExecutionId().GetName(),
},
},
RetryAttempt: &retryAttemptValue,
@@ -219,17 +219,17 @@ func TestCreateTaskEvent_Update(t *testing.T) {
return models.TaskExecution{
TaskExecutionKey: models.TaskExecutionKey{
TaskKey: models.TaskKey{
- Project: sampleTaskID.Project,
- Domain: sampleTaskID.Domain,
- Name: sampleTaskID.Name,
- Version: sampleTaskID.Version,
+ Project: sampleTaskID.GetProject(),
+ Domain: sampleTaskID.GetDomain(),
+ Name: sampleTaskID.GetName(),
+ Version: sampleTaskID.GetVersion(),
},
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: sampleNodeExecID.NodeId,
+ NodeID: sampleNodeExecID.GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: sampleNodeExecID.ExecutionId.Project,
- Domain: sampleNodeExecID.ExecutionId.Domain,
- Name: sampleNodeExecID.ExecutionId.Name,
+ Project: sampleNodeExecID.GetExecutionId().GetProject(),
+ Domain: sampleNodeExecID.GetExecutionId().GetDomain(),
+ Name: sampleNodeExecID.GetExecutionId().GetName(),
},
},
},
@@ -266,17 +266,17 @@ func TestCreateTaskEvent_Update(t *testing.T) {
assert.EqualValues(t, models.TaskExecution{
TaskExecutionKey: models.TaskExecutionKey{
TaskKey: models.TaskKey{
- Project: sampleTaskID.Project,
- Domain: sampleTaskID.Domain,
- Name: sampleTaskID.Name,
- Version: sampleTaskID.Version,
+ Project: sampleTaskID.GetProject(),
+ Domain: sampleTaskID.GetDomain(),
+ Name: sampleTaskID.GetName(),
+ Version: sampleTaskID.GetVersion(),
},
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: sampleNodeExecID.NodeId,
+ NodeID: sampleNodeExecID.GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: sampleNodeExecID.ExecutionId.Project,
- Domain: sampleNodeExecID.ExecutionId.Domain,
- Name: sampleNodeExecID.ExecutionId.Name,
+ Project: sampleNodeExecID.GetExecutionId().GetProject(),
+ Domain: sampleNodeExecID.GetExecutionId().GetDomain(),
+ Name: sampleNodeExecID.GetExecutionId().GetName(),
},
},
},
@@ -368,17 +368,17 @@ func TestCreateTaskEvent_UpdateDatabaseError(t *testing.T) {
return models.TaskExecution{
TaskExecutionKey: models.TaskExecutionKey{
TaskKey: models.TaskKey{
- Project: sampleTaskID.Project,
- Domain: sampleTaskID.Domain,
- Name: sampleTaskID.Name,
- Version: sampleTaskID.Version,
+ Project: sampleTaskID.GetProject(),
+ Domain: sampleTaskID.GetDomain(),
+ Name: sampleTaskID.GetName(),
+ Version: sampleTaskID.GetVersion(),
},
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: sampleNodeExecID.NodeId,
+ NodeID: sampleNodeExecID.GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: sampleNodeExecID.ExecutionId.Project,
- Domain: sampleNodeExecID.ExecutionId.Domain,
- Name: sampleNodeExecID.ExecutionId.Name,
+ Project: sampleNodeExecID.GetExecutionId().GetProject(),
+ Domain: sampleNodeExecID.GetExecutionId().GetDomain(),
+ Name: sampleNodeExecID.GetExecutionId().GetName(),
},
},
RetryAttempt: &retryAttemptValue,
@@ -407,17 +407,17 @@ func TestCreateTaskEvent_UpdateTerminalEventError(t *testing.T) {
return models.TaskExecution{
TaskExecutionKey: models.TaskExecutionKey{
TaskKey: models.TaskKey{
- Project: sampleTaskID.Project,
- Domain: sampleTaskID.Domain,
- Name: sampleTaskID.Name,
- Version: sampleTaskID.Version,
+ Project: sampleTaskID.GetProject(),
+ Domain: sampleTaskID.GetDomain(),
+ Name: sampleTaskID.GetName(),
+ Version: sampleTaskID.GetVersion(),
},
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: sampleNodeExecID.NodeId,
+ NodeID: sampleNodeExecID.GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: sampleNodeExecID.ExecutionId.Project,
- Domain: sampleNodeExecID.ExecutionId.Domain,
- Name: sampleNodeExecID.ExecutionId.Name,
+ Project: sampleNodeExecID.GetExecutionId().GetProject(),
+ Domain: sampleNodeExecID.GetExecutionId().GetDomain(),
+ Name: sampleNodeExecID.GetExecutionId().GetName(),
},
},
RetryAttempt: &retryAttemptValue,
@@ -458,17 +458,17 @@ func TestCreateTaskEvent_PhaseVersionChange(t *testing.T) {
return models.TaskExecution{
TaskExecutionKey: models.TaskExecutionKey{
TaskKey: models.TaskKey{
- Project: sampleTaskID.Project,
- Domain: sampleTaskID.Domain,
- Name: sampleTaskID.Name,
- Version: sampleTaskID.Version,
+ Project: sampleTaskID.GetProject(),
+ Domain: sampleTaskID.GetDomain(),
+ Name: sampleTaskID.GetName(),
+ Version: sampleTaskID.GetVersion(),
},
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: sampleNodeExecID.NodeId,
+ NodeID: sampleNodeExecID.GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: sampleNodeExecID.ExecutionId.Project,
- Domain: sampleNodeExecID.ExecutionId.Domain,
- Name: sampleNodeExecID.ExecutionId.Name,
+ Project: sampleNodeExecID.GetExecutionId().GetProject(),
+ Domain: sampleNodeExecID.GetExecutionId().GetDomain(),
+ Name: sampleNodeExecID.GetExecutionId().GetName(),
},
},
},
@@ -526,23 +526,23 @@ func TestGetTaskExecution(t *testing.T) {
repository.TaskExecutionRepo().(*repositoryMocks.MockTaskExecutionRepo).SetGetCallback(
func(ctx context.Context, input interfaces.GetTaskExecutionInput) (models.TaskExecution, error) {
getTaskCalled = true
- assert.Equal(t, sampleTaskID, input.TaskExecutionID.TaskId)
- assert.Equal(t, sampleNodeExecID, input.TaskExecutionID.NodeExecutionId)
- assert.Equal(t, uint32(1), input.TaskExecutionID.RetryAttempt)
+ assert.Equal(t, sampleTaskID, input.TaskExecutionID.GetTaskId())
+ assert.Equal(t, sampleNodeExecID, input.TaskExecutionID.GetNodeExecutionId())
+ assert.Equal(t, uint32(1), input.TaskExecutionID.GetRetryAttempt())
return models.TaskExecution{
TaskExecutionKey: models.TaskExecutionKey{
TaskKey: models.TaskKey{
- Project: sampleTaskID.Project,
- Domain: sampleTaskID.Domain,
- Name: sampleTaskID.Name,
- Version: sampleTaskID.Version,
+ Project: sampleTaskID.GetProject(),
+ Domain: sampleTaskID.GetDomain(),
+ Name: sampleTaskID.GetName(),
+ Version: sampleTaskID.GetVersion(),
},
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: sampleNodeExecID.NodeId,
+ NodeID: sampleNodeExecID.GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: sampleNodeExecID.ExecutionId.Project,
- Domain: sampleNodeExecID.ExecutionId.Domain,
- Name: sampleNodeExecID.ExecutionId.Name,
+ Project: sampleNodeExecID.GetExecutionId().GetProject(),
+ Domain: sampleNodeExecID.GetExecutionId().GetDomain(),
+ Name: sampleNodeExecID.GetExecutionId().GetName(),
},
},
RetryAttempt: &retryAttemptValue,
@@ -581,17 +581,17 @@ func TestGetTaskExecution_TransformerError(t *testing.T) {
return models.TaskExecution{
TaskExecutionKey: models.TaskExecutionKey{
TaskKey: models.TaskKey{
- Project: sampleTaskID.Project,
- Domain: sampleTaskID.Domain,
- Name: sampleTaskID.Name,
- Version: sampleTaskID.Version,
+ Project: sampleTaskID.GetProject(),
+ Domain: sampleTaskID.GetDomain(),
+ Name: sampleTaskID.GetName(),
+ Version: sampleTaskID.GetVersion(),
},
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: sampleNodeExecID.NodeId,
+ NodeID: sampleNodeExecID.GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: sampleNodeExecID.ExecutionId.Project,
- Domain: sampleNodeExecID.ExecutionId.Domain,
- Name: sampleNodeExecID.ExecutionId.Name,
+ Project: sampleNodeExecID.GetExecutionId().GetProject(),
+ Domain: sampleNodeExecID.GetExecutionId().GetDomain(),
+ Name: sampleNodeExecID.GetExecutionId().GetName(),
},
},
RetryAttempt: &retryAttemptValue,
@@ -752,7 +752,7 @@ func TestListTaskExecutions(t *testing.T) {
},
InputUri: "input-uri.pb",
Closure: expectedClosure,
- }, taskExecutions.TaskExecutions[0]))
+ }, taskExecutions.GetTaskExecutions()[0]))
assert.True(t, proto.Equal(&admin.TaskExecution{
Id: &core.TaskExecutionIdentifier{
RetryAttempt: secondRetryAttempt,
@@ -774,7 +774,7 @@ func TestListTaskExecutions(t *testing.T) {
},
InputUri: "input-uri2.pb",
Closure: expectedClosure,
- }, taskExecutions.TaskExecutions[1]))
+ }, taskExecutions.GetTaskExecutions()[1]))
}
func TestListTaskExecutions_Filters(t *testing.T) {
@@ -925,7 +925,7 @@ func TestListTaskExecutions_Filters(t *testing.T) {
},
InputUri: "input-uri.pb",
Closure: expectedClosure,
- }, taskExecutions.TaskExecutions[0]))
+ }, taskExecutions.GetTaskExecutions()[0]))
assert.True(t, proto.Equal(&admin.TaskExecution{
Id: &core.TaskExecutionIdentifier{
RetryAttempt: secondRetryAttempt,
@@ -947,7 +947,7 @@ func TestListTaskExecutions_Filters(t *testing.T) {
},
InputUri: "input-uri2.pb",
Closure: expectedClosure,
- }, taskExecutions.TaskExecutions[1]))
+ }, taskExecutions.GetTaskExecutions()[1]))
}
func TestListTaskExecutions_NoFilters(t *testing.T) {
@@ -1049,17 +1049,17 @@ func TestGetTaskExecutionData(t *testing.T) {
return models.TaskExecution{
TaskExecutionKey: models.TaskExecutionKey{
TaskKey: models.TaskKey{
- Project: sampleTaskID.Project,
- Domain: sampleTaskID.Domain,
- Name: sampleTaskID.Name,
- Version: sampleTaskID.Version,
+ Project: sampleTaskID.GetProject(),
+ Domain: sampleTaskID.GetDomain(),
+ Name: sampleTaskID.GetName(),
+ Version: sampleTaskID.GetVersion(),
},
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: sampleNodeExecID.NodeId,
+ NodeID: sampleNodeExecID.GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: sampleNodeExecID.ExecutionId.Project,
- Domain: sampleNodeExecID.ExecutionId.Domain,
- Name: sampleNodeExecID.ExecutionId.Name,
+ Project: sampleNodeExecID.GetExecutionId().GetProject(),
+ Domain: sampleNodeExecID.GetExecutionId().GetDomain(),
+ Name: sampleNodeExecID.GetExecutionId().GetName(),
},
},
RetryAttempt: &retryAttemptValue,
diff --git a/flyteadmin/pkg/manager/impl/task_manager.go b/flyteadmin/pkg/manager/impl/task_manager.go
index 157bcab5cd..7d903e98fb 100644
--- a/flyteadmin/pkg/manager/impl/task_manager.go
+++ b/flyteadmin/pkg/manager/impl/task_manager.go
@@ -44,44 +44,44 @@ type TaskManager struct {
}
func getTaskContext(ctx context.Context, identifier *core.Identifier) context.Context {
- ctx = contextutils.WithProjectDomain(ctx, identifier.Project, identifier.Domain)
- return contextutils.WithTaskID(ctx, identifier.Name)
+ ctx = contextutils.WithProjectDomain(ctx, identifier.GetProject(), identifier.GetDomain())
+ return contextutils.WithTaskID(ctx, identifier.GetName())
}
func setDefaults(request *admin.TaskCreateRequest) (*admin.TaskCreateRequest, error) {
- if request.Id == nil {
+ if request.GetId() == nil {
return request, errors.NewFlyteAdminError(codes.InvalidArgument,
"missing identifier for TaskCreateRequest")
}
- request.Spec.Template.Id = request.Id
+ request.Spec.Template.Id = request.GetId()
return request, nil
}
func (t *TaskManager) CreateTask(
ctx context.Context,
request *admin.TaskCreateRequest) (*admin.TaskCreateResponse, error) {
- platformTaskResources := util.GetTaskResources(ctx, request.Id, t.resourceManager, t.config.TaskResourceConfiguration())
+ platformTaskResources := util.GetTaskResources(ctx, request.GetId(), t.resourceManager, t.config.TaskResourceConfiguration())
if err := validation.ValidateTask(ctx, request, t.db, platformTaskResources,
t.config.WhitelistConfiguration(), t.config.ApplicationConfiguration()); err != nil {
- logger.Debugf(ctx, "Task [%+v] failed validation with err: %v", request.Id, err)
+ logger.Debugf(ctx, "Task [%+v] failed validation with err: %v", request.GetId(), err)
return nil, err
}
- ctx = getTaskContext(ctx, request.Id)
+ ctx = getTaskContext(ctx, request.GetId())
finalizedRequest, err := setDefaults(request)
if err != nil {
return nil, err
}
// Compile task and store the compiled version in the database.
- compiledTask, err := t.compiler.CompileTask(finalizedRequest.Spec.Template)
+ compiledTask, err := t.compiler.CompileTask(finalizedRequest.GetSpec().GetTemplate())
if err != nil {
- logger.Debugf(ctx, "Failed to compile task with id [%+v] with err %v", request.Id, err)
+ logger.Debugf(ctx, "Failed to compile task with id [%+v] with err %v", request.GetId(), err)
return nil, err
}
createdAt, err := ptypes.TimestampProto(time.Now())
if err != nil {
return nil, errors.NewFlyteAdminErrorf(codes.Internal,
- "Failed to serialize CreatedAt: %v when creating task: %+v", err, request.Id)
+ "Failed to serialize CreatedAt: %v when creating task: %+v", err, request.GetId())
}
taskDigest, err := util.GetTaskDigest(ctx, compiledTask)
if err != nil {
@@ -89,7 +89,7 @@ func (t *TaskManager) CreateTask(
return nil, err
}
// See if a task exists and confirm whether it's an identical task or one that with a separate definition.
- existingTaskModel, err := util.GetTaskModel(ctx, t.db, request.Spec.Template.Id)
+ existingTaskModel, err := util.GetTaskModel(ctx, t.db, request.GetSpec().GetTemplate().GetId())
if err == nil {
if bytes.Equal(taskDigest, existingTaskModel.Digest) {
return nil, errors.NewTaskExistsIdenticalStructureError(ctx, request)
@@ -99,7 +99,7 @@ func (t *TaskManager) CreateTask(
logger.Errorf(ctx, "failed to transform task from task model")
return nil, transformerErr
}
- return nil, errors.NewTaskExistsDifferentStructureError(ctx, request, existingTask.Closure.GetCompiledTask(), compiledTask)
+ return nil, errors.NewTaskExistsDifferentStructureError(ctx, request, existingTask.GetClosure().GetCompiledTask(), compiledTask)
}
taskModel, err := transformers.CreateTaskModel(finalizedRequest, &admin.TaskClosure{
CompiledTask: compiledTask,
@@ -111,10 +111,10 @@ func (t *TaskManager) CreateTask(
return nil, err
}
- descriptionModel, err := transformers.CreateDescriptionEntityModel(request.Spec.Description, request.Id)
+ descriptionModel, err := transformers.CreateDescriptionEntityModel(request.GetSpec().GetDescription(), request.GetId())
if err != nil {
logger.Errorf(ctx,
- "Failed to transform description model [%+v] with err: %v", request.Spec.Description, err)
+ "Failed to transform description model [%+v] with err: %v", request.GetSpec().GetDescription(), err)
return nil, err
}
if descriptionModel != nil {
@@ -122,15 +122,15 @@ func (t *TaskManager) CreateTask(
}
err = t.db.TaskRepo().Create(ctx, taskModel, descriptionModel)
if err != nil {
- logger.Debugf(ctx, "Failed to create task model with id [%+v] with err %v", request.Id, err)
+ logger.Debugf(ctx, "Failed to create task model with id [%+v] with err %v", request.GetId(), err)
return nil, err
}
t.metrics.ClosureSizeBytes.Observe(float64(len(taskModel.Closure)))
- if finalizedRequest.Spec.Template.Metadata != nil {
+ if finalizedRequest.GetSpec().GetTemplate().GetMetadata() != nil {
contextWithRuntimeMeta := context.WithValue(
- ctx, common.RuntimeTypeKey, finalizedRequest.Spec.Template.Metadata.Runtime.Type.String())
+ ctx, common.RuntimeTypeKey, finalizedRequest.GetSpec().GetTemplate().GetMetadata().GetRuntime().GetType().String())
contextWithRuntimeMeta = context.WithValue(
- contextWithRuntimeMeta, common.RuntimeVersionKey, finalizedRequest.Spec.Template.Metadata.Runtime.Version)
+ contextWithRuntimeMeta, common.RuntimeVersionKey, finalizedRequest.GetSpec().GetTemplate().GetMetadata().GetRuntime().GetVersion())
t.metrics.Registered.Inc(contextWithRuntimeMeta)
}
@@ -138,13 +138,13 @@ func (t *TaskManager) CreateTask(
}
func (t *TaskManager) GetTask(ctx context.Context, request *admin.ObjectGetRequest) (*admin.Task, error) {
- if err := validation.ValidateIdentifier(request.Id, common.Task); err != nil {
- logger.Debugf(ctx, "invalid identifier [%+v]: %v", request.Id, err)
+ if err := validation.ValidateIdentifier(request.GetId(), common.Task); err != nil {
+ logger.Debugf(ctx, "invalid identifier [%+v]: %v", request.GetId(), err)
}
- ctx = getTaskContext(ctx, request.Id)
- task, err := util.GetTask(ctx, t.db, request.Id)
+ ctx = getTaskContext(ctx, request.GetId())
+ task, err := util.GetTask(ctx, t.db, request.GetId())
if err != nil {
- logger.Debugf(ctx, "Failed to get task with id [%+v] with err %v", err, request.Id)
+ logger.Debugf(ctx, "Failed to get task with id [%+v] with err %v", err, request.GetId())
return nil, err
}
return task, nil
@@ -156,13 +156,13 @@ func (t *TaskManager) ListTasks(ctx context.Context, request *admin.ResourceList
logger.Debugf(ctx, "Invalid request [%+v]: %v", request, err)
return nil, err
}
- ctx = contextutils.WithProjectDomain(ctx, request.Id.Project, request.Id.Domain)
- ctx = contextutils.WithTaskID(ctx, request.Id.Name)
+ ctx = contextutils.WithProjectDomain(ctx, request.GetId().GetProject(), request.GetId().GetDomain())
+ ctx = contextutils.WithTaskID(ctx, request.GetId().GetName())
spec := util.FilterSpec{
- Project: request.Id.Project,
- Domain: request.Id.Domain,
- Name: request.Id.Name,
- RequestFilters: request.Filters,
+ Project: request.GetId().GetProject(),
+ Domain: request.GetId().GetDomain(),
+ Name: request.GetId().GetName(),
+ RequestFilters: request.GetFilters(),
}
filters, err := util.GetDbFilters(spec, common.Task)
@@ -170,26 +170,26 @@ func (t *TaskManager) ListTasks(ctx context.Context, request *admin.ResourceList
return nil, err
}
- sortParameter, err := common.NewSortParameter(request.SortBy, models.TaskColumns)
+ sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.TaskColumns)
if err != nil {
return nil, err
}
- offset, err := validation.ValidateToken(request.Token)
+ offset, err := validation.ValidateToken(request.GetToken())
if err != nil {
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument,
- "invalid pagination token %s for ListTasks", request.Token)
+ "invalid pagination token %s for ListTasks", request.GetToken())
}
// And finally, query the database
listTasksInput := repoInterfaces.ListResourceInput{
- Limit: int(request.Limit),
+ Limit: int(request.GetLimit()),
Offset: offset,
InlineFilters: filters,
SortParameter: sortParameter,
}
output, err := t.db.TaskRepo().List(ctx, listTasksInput)
if err != nil {
- logger.Debugf(ctx, "Failed to list tasks with id [%+v] with err %v", request.Id, err)
+ logger.Debugf(ctx, "Failed to list tasks with id [%+v] with err %v", request.GetId(), err)
return nil, err
}
taskList, err := transformers.FromTaskModels(output.Tasks)
@@ -200,7 +200,7 @@ func (t *TaskManager) ListTasks(ctx context.Context, request *admin.ResourceList
}
var token string
- if len(taskList) == int(request.Limit) {
+ if len(taskList) == int(request.GetLimit()) {
token = strconv.Itoa(offset + len(taskList))
}
return &admin.TaskList{
@@ -217,27 +217,27 @@ func (t *TaskManager) ListUniqueTaskIdentifiers(ctx context.Context, request *ad
logger.Debugf(ctx, "invalid request [%+v]: %v", request, err)
return nil, err
}
- ctx = contextutils.WithProjectDomain(ctx, request.Project, request.Domain)
+ ctx = contextutils.WithProjectDomain(ctx, request.GetProject(), request.GetDomain())
filters, err := util.GetDbFilters(util.FilterSpec{
- Project: request.Project,
- Domain: request.Domain,
+ Project: request.GetProject(),
+ Domain: request.GetDomain(),
}, common.Task)
if err != nil {
return nil, err
}
- sortParameter, err := common.NewSortParameter(request.SortBy, models.TaskColumns)
+ sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.TaskColumns)
if err != nil {
return nil, err
}
- offset, err := validation.ValidateToken(request.Token)
+ offset, err := validation.ValidateToken(request.GetToken())
if err != nil {
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument,
- "invalid pagination token %s for ListUniqueTaskIdentifiers", request.Token)
+ "invalid pagination token %s for ListUniqueTaskIdentifiers", request.GetToken())
}
listTasksInput := repoInterfaces.ListResourceInput{
- Limit: int(request.Limit),
+ Limit: int(request.GetLimit()),
Offset: offset,
InlineFilters: filters,
SortParameter: sortParameter,
@@ -246,13 +246,13 @@ func (t *TaskManager) ListUniqueTaskIdentifiers(ctx context.Context, request *ad
output, err := t.db.TaskRepo().ListTaskIdentifiers(ctx, listTasksInput)
if err != nil {
logger.Debugf(ctx, "Failed to list tasks ids with project: %s and domain: %s with err %v",
- request.Project, request.Domain, err)
+ request.GetProject(), request.GetDomain(), err)
return nil, err
}
idList := transformers.FromTaskModelsToIdentifiers(output.Tasks)
var token string
- if len(idList) == int(request.Limit) {
+ if len(idList) == int(request.GetLimit()) {
token = strconv.Itoa(offset + len(idList))
}
return &admin.NamedEntityIdentifierList{
diff --git a/flyteadmin/pkg/manager/impl/task_manager_test.go b/flyteadmin/pkg/manager/impl/task_manager_test.go
index 4308fc2626..1301444ceb 100644
--- a/flyteadmin/pkg/manager/impl/task_manager_test.go
+++ b/flyteadmin/pkg/manager/impl/task_manager_test.go
@@ -172,11 +172,11 @@ func TestGetTask(t *testing.T) {
Id: &taskIdentifier,
})
assert.NoError(t, err)
- assert.Equal(t, "project", task.Id.Project)
- assert.Equal(t, "domain", task.Id.Domain)
- assert.Equal(t, "name", task.Id.Name)
- assert.Equal(t, "version", task.Id.Version)
- assert.True(t, proto.Equal(testutils.GetTaskClosure(), task.Closure))
+ assert.Equal(t, "project", task.GetId().GetProject())
+ assert.Equal(t, "domain", task.GetId().GetDomain())
+ assert.Equal(t, "name", task.GetId().GetName())
+ assert.Equal(t, "version", task.GetId().GetVersion())
+ assert.True(t, proto.Equal(testutils.GetTaskClosure(), task.GetClosure()))
}
func TestGetTask_DatabaseError(t *testing.T) {
@@ -287,18 +287,18 @@ func TestListTasks(t *testing.T) {
})
assert.NoError(t, err)
assert.NotNil(t, taskList)
- assert.Len(t, taskList.Tasks, 2)
+ assert.Len(t, taskList.GetTasks(), 2)
- for idx, task := range taskList.Tasks {
- assert.Equal(t, projectValue, task.Id.Project)
- assert.Equal(t, domainValue, task.Id.Domain)
- assert.Equal(t, nameValue, task.Id.Name)
- assert.Equal(t, fmt.Sprintf("version %v", idx), task.Id.Version)
+ for idx, task := range taskList.GetTasks() {
+ assert.Equal(t, projectValue, task.GetId().GetProject())
+ assert.Equal(t, domainValue, task.GetId().GetDomain())
+ assert.Equal(t, nameValue, task.GetId().GetName())
+ assert.Equal(t, fmt.Sprintf("version %v", idx), task.GetId().GetVersion())
assert.True(t, proto.Equal(&admin.TaskClosure{
CreatedAt: testutils.MockCreatedAtProto,
- }, task.Closure))
+ }, task.GetClosure()))
}
- assert.Equal(t, "2", taskList.Token)
+ assert.Equal(t, "2", taskList.GetToken())
}
func TestListTasks_MissingParameters(t *testing.T) {
@@ -401,6 +401,6 @@ func TestListUniqueTaskIdentifiers(t *testing.T) {
})
assert.NoError(t, err)
- assert.Equal(t, 2, len(resp.Entities))
- assert.Empty(t, resp.Token)
+ assert.Equal(t, 2, len(resp.GetEntities()))
+ assert.Empty(t, resp.GetToken())
}
diff --git a/flyteadmin/pkg/manager/impl/testutils/mock_closures.go b/flyteadmin/pkg/manager/impl/testutils/mock_closures.go
index 6554bd6403..945310daf1 100644
--- a/flyteadmin/pkg/manager/impl/testutils/mock_closures.go
+++ b/flyteadmin/pkg/manager/impl/testutils/mock_closures.go
@@ -17,7 +17,7 @@ var MockCreatedAtProto, _ = ptypes.TimestampProto(MockCreatedAtValue)
func GetTaskClosure() *admin.TaskClosure {
return &admin.TaskClosure{
CompiledTask: &core.CompiledTask{
- Template: GetValidTaskRequest().Spec.Template,
+ Template: GetValidTaskRequest().GetSpec().GetTemplate(),
},
CreatedAt: MockCreatedAtProto,
}
@@ -32,11 +32,11 @@ func GetWorkflowClosure() *admin.WorkflowClosure {
return &admin.WorkflowClosure{
CompiledWorkflow: &core.CompiledWorkflowClosure{
Primary: &core.CompiledWorkflow{
- Template: GetWorkflowRequest().Spec.Template,
+ Template: GetWorkflowRequest().GetSpec().GetTemplate(),
},
Tasks: []*core.CompiledTask{
{
- Template: GetValidTaskRequest().Spec.Template,
+ Template: GetValidTaskRequest().GetSpec().GetTemplate(),
},
},
},
diff --git a/flyteadmin/pkg/manager/impl/testutils/mock_requests.go b/flyteadmin/pkg/manager/impl/testutils/mock_requests.go
index b3d01897f1..64ab792220 100644
--- a/flyteadmin/pkg/manager/impl/testutils/mock_requests.go
+++ b/flyteadmin/pkg/manager/impl/testutils/mock_requests.go
@@ -328,10 +328,10 @@ func GetSampleLpSpecForTest() *admin.LaunchPlanSpec {
}
func GetWorkflowRequestInterfaceBytes() []byte {
- bytes, _ := proto.Marshal(GetWorkflowRequest().Spec.Template.Interface)
+ bytes, _ := proto.Marshal(GetWorkflowRequest().GetSpec().GetTemplate().GetInterface())
return bytes
}
func GetWorkflowRequestInterface() *core.TypedInterface {
- return GetWorkflowRequest().Spec.Template.Interface
+ return GetWorkflowRequest().GetSpec().GetTemplate().GetInterface()
}
diff --git a/flyteadmin/pkg/manager/impl/util/data.go b/flyteadmin/pkg/manager/impl/util/data.go
index d6fe14af2e..1827cfd167 100644
--- a/flyteadmin/pkg/manager/impl/util/data.go
+++ b/flyteadmin/pkg/manager/impl/util/data.go
@@ -21,7 +21,7 @@ const (
func shouldFetchData(config *runtimeInterfaces.RemoteDataConfig, urlBlob *admin.UrlBlob) bool {
return config.Scheme == common.Local || config.Scheme == common.None || config.MaxSizeInBytes == 0 ||
- urlBlob.Bytes < config.MaxSizeInBytes
+ urlBlob.GetBytes() < config.MaxSizeInBytes
}
func shouldFetchOutputData(config *runtimeInterfaces.RemoteDataConfig, urlBlob *admin.UrlBlob, outputURI string) bool {
diff --git a/flyteadmin/pkg/manager/impl/util/digests.go b/flyteadmin/pkg/manager/impl/util/digests.go
index 2846490f71..6fd31273c2 100644
--- a/flyteadmin/pkg/manager/impl/util/digests.go
+++ b/flyteadmin/pkg/manager/impl/util/digests.go
@@ -17,9 +17,9 @@ func GetLaunchPlanDigest(ctx context.Context, launchPlan *admin.LaunchPlan) ([]b
launchPlanDigest, err := pbhash.ComputeHash(ctx, launchPlan)
if err != nil {
logger.Warningf(ctx, "failed to hash launch plan [%+v] to digest with err %v",
- launchPlan.Id, err)
+ launchPlan.GetId(), err)
return nil, errors.NewFlyteAdminErrorf(codes.Internal,
- "failed to hash launch plan [%+v] to digest with err %v", launchPlan.Id, err)
+ "failed to hash launch plan [%+v] to digest with err %v", launchPlan.GetId(), err)
}
return launchPlanDigest, nil
@@ -30,9 +30,9 @@ func GetTaskDigest(ctx context.Context, task *core.CompiledTask) ([]byte, error)
taskDigest, err := pbhash.ComputeHash(ctx, task)
if err != nil {
logger.Warningf(ctx, "failed to hash task [%+v] to digest with err %v",
- task.Template.Id, err)
+ task.GetTemplate().GetId(), err)
return nil, errors.NewFlyteAdminErrorf(codes.Internal,
- "failed to hash task [%+v] to digest with err %v", task.Template.Id, err)
+ "failed to hash task [%+v] to digest with err %v", task.GetTemplate().GetId(), err)
}
return taskDigest, nil
@@ -43,9 +43,9 @@ func GetWorkflowDigest(ctx context.Context, workflowClosure *core.CompiledWorkfl
workflowDigest, err := pbhash.ComputeHash(ctx, workflowClosure)
if err != nil {
logger.Warningf(ctx, "failed to hash workflow [%+v] to digest with err %v",
- workflowClosure.Primary.Template.Id, err)
+ workflowClosure.GetPrimary().GetTemplate().GetId(), err)
return nil, errors.NewFlyteAdminErrorf(codes.Internal,
- "failed to hash workflow [%+v] to digest with err %v", workflowClosure.Primary.Template.Id, err)
+ "failed to hash workflow [%+v] to digest with err %v", workflowClosure.GetPrimary().GetTemplate().GetId(), err)
}
return workflowDigest, nil
diff --git a/flyteadmin/pkg/manager/impl/util/digests_test.go b/flyteadmin/pkg/manager/impl/util/digests_test.go
index ee3ea93d19..870fbd4cbd 100644
--- a/flyteadmin/pkg/manager/impl/util/digests_test.go
+++ b/flyteadmin/pkg/manager/impl/util/digests_test.go
@@ -149,7 +149,7 @@ func TestGetWorkflowDigest_Unequal(t *testing.T) {
workflowWithDifferentNodes, err := getCompiledWorkflow()
assert.Nil(t, err)
workflowWithDifferentNodes.Primary.Template.Nodes = append(
- workflowWithDifferentNodes.Primary.Template.Nodes, &core.Node{
+ workflowWithDifferentNodes.GetPrimary().GetTemplate().GetNodes(), &core.Node{
Id: "unexpected",
})
workflowDigest, err := GetWorkflowDigest(context.Background(), workflowWithDifferentNodes)
diff --git a/flyteadmin/pkg/manager/impl/util/filters.go b/flyteadmin/pkg/manager/impl/util/filters.go
index 377dcdab51..b6426a3852 100644
--- a/flyteadmin/pkg/manager/impl/util/filters.go
+++ b/flyteadmin/pkg/manager/impl/util/filters.go
@@ -274,28 +274,28 @@ func GetWorkflowExecutionIdentifierFilters(
ctx context.Context, workflowExecutionIdentifier *core.WorkflowExecutionIdentifier, entity common.Entity) ([]common.InlineFilter, error) {
identifierFilters := make([]common.InlineFilter, 3)
identifierProjectFilter, err := GetSingleValueEqualityFilter(
- entity, shared.Project, workflowExecutionIdentifier.Project)
+ entity, shared.Project, workflowExecutionIdentifier.GetProject())
if err != nil {
logger.Warningf(ctx, "Failed to create execution identifier filter for project: %s with identifier [%+v]",
- workflowExecutionIdentifier.Project, workflowExecutionIdentifier)
+ workflowExecutionIdentifier.GetProject(), workflowExecutionIdentifier)
return nil, err
}
identifierFilters[0] = identifierProjectFilter
identifierDomainFilter, err := GetSingleValueEqualityFilter(
- entity, shared.Domain, workflowExecutionIdentifier.Domain)
+ entity, shared.Domain, workflowExecutionIdentifier.GetDomain())
if err != nil {
logger.Warningf(ctx, "Failed to create execution identifier filter for domain: %s with identifier [%+v]",
- workflowExecutionIdentifier.Domain, workflowExecutionIdentifier)
+ workflowExecutionIdentifier.GetDomain(), workflowExecutionIdentifier)
return nil, err
}
identifierFilters[1] = identifierDomainFilter
identifierNameFilter, err := GetSingleValueEqualityFilter(
- entity, shared.Name, workflowExecutionIdentifier.Name)
+ entity, shared.Name, workflowExecutionIdentifier.GetName())
if err != nil {
logger.Warningf(ctx, "Failed to create execution identifier filter for domain: %s with identifier [%+v]",
- workflowExecutionIdentifier.Name, workflowExecutionIdentifier)
+ workflowExecutionIdentifier.GetName(), workflowExecutionIdentifier)
return nil, err
}
identifierFilters[2] = identifierNameFilter
@@ -306,15 +306,15 @@ func GetWorkflowExecutionIdentifierFilters(
func GetNodeExecutionIdentifierFilters(
ctx context.Context, nodeExecutionIdentifier *core.NodeExecutionIdentifier, entity common.Entity) ([]common.InlineFilter, error) {
workflowExecutionIdentifierFilters, err :=
- GetWorkflowExecutionIdentifierFilters(ctx, nodeExecutionIdentifier.ExecutionId, entity)
+ GetWorkflowExecutionIdentifierFilters(ctx, nodeExecutionIdentifier.GetExecutionId(), entity)
if err != nil {
return nil, err
}
nodeIDFilter, err := GetSingleValueEqualityFilter(
- entity, shared.NodeID, nodeExecutionIdentifier.NodeId)
+ entity, shared.NodeID, nodeExecutionIdentifier.GetNodeId())
if err != nil {
logger.Warningf(ctx, "Failed to create node execution identifier filter for node id: %s with identifier [%+v]",
- nodeExecutionIdentifier.NodeId, nodeExecutionIdentifier)
+ nodeExecutionIdentifier.GetNodeId(), nodeExecutionIdentifier)
}
return append(workflowExecutionIdentifierFilters, nodeIDFilter), nil
}
diff --git a/flyteadmin/pkg/manager/impl/util/resources.go b/flyteadmin/pkg/manager/impl/util/resources.go
index 79aadb61b2..cd92bb671d 100644
--- a/flyteadmin/pkg/manager/impl/util/resources.go
+++ b/flyteadmin/pkg/manager/impl/util/resources.go
@@ -31,16 +31,16 @@ func getTaskResourcesAsSet(ctx context.Context, identifier *core.Identifier,
result := runtimeInterfaces.TaskResourceSet{}
for _, entry := range resourceEntries {
- switch entry.Name {
+ switch entry.GetName() {
case core.Resources_CPU:
- result.CPU = parseQuantityNoError(ctx, identifier.String(), fmt.Sprintf("%v.cpu", resourceName), entry.Value)
+ result.CPU = parseQuantityNoError(ctx, identifier.String(), fmt.Sprintf("%v.cpu", resourceName), entry.GetValue())
case core.Resources_MEMORY:
- result.Memory = parseQuantityNoError(ctx, identifier.String(), fmt.Sprintf("%v.memory", resourceName), entry.Value)
+ result.Memory = parseQuantityNoError(ctx, identifier.String(), fmt.Sprintf("%v.memory", resourceName), entry.GetValue())
case core.Resources_EPHEMERAL_STORAGE:
result.EphemeralStorage = parseQuantityNoError(ctx, identifier.String(),
- fmt.Sprintf("%v.ephemeral storage", resourceName), entry.Value)
+ fmt.Sprintf("%v.ephemeral storage", resourceName), entry.GetValue())
case core.Resources_GPU:
- result.GPU = parseQuantityNoError(ctx, identifier.String(), "gpu", entry.Value)
+ result.GPU = parseQuantityNoError(ctx, identifier.String(), "gpu", entry.GetValue())
}
}
@@ -50,28 +50,28 @@ func getTaskResourcesAsSet(ctx context.Context, identifier *core.Identifier,
// GetCompleteTaskResourceRequirements parses the resource requests and limits from the `TaskTemplate` Container.
func GetCompleteTaskResourceRequirements(ctx context.Context, identifier *core.Identifier, task *core.CompiledTask) workflowengineInterfaces.TaskResources {
return workflowengineInterfaces.TaskResources{
- Defaults: getTaskResourcesAsSet(ctx, identifier, task.GetTemplate().GetContainer().Resources.Requests, "requests"),
- Limits: getTaskResourcesAsSet(ctx, identifier, task.GetTemplate().GetContainer().Resources.Limits, "limits"),
+ Defaults: getTaskResourcesAsSet(ctx, identifier, task.GetTemplate().GetContainer().GetResources().GetRequests(), "requests"),
+ Limits: getTaskResourcesAsSet(ctx, identifier, task.GetTemplate().GetContainer().GetResources().GetLimits(), "limits"),
}
}
// fromAdminProtoTaskResourceSpec parses the flyteidl `TaskResourceSpec` message into a `TaskResourceSet`.
func fromAdminProtoTaskResourceSpec(ctx context.Context, spec *admin.TaskResourceSpec) runtimeInterfaces.TaskResourceSet {
result := runtimeInterfaces.TaskResourceSet{}
- if len(spec.Cpu) > 0 {
- result.CPU = parseQuantityNoError(ctx, "project", "cpu", spec.Cpu)
+ if len(spec.GetCpu()) > 0 {
+ result.CPU = parseQuantityNoError(ctx, "project", "cpu", spec.GetCpu())
}
- if len(spec.Memory) > 0 {
- result.Memory = parseQuantityNoError(ctx, "project", "memory", spec.Memory)
+ if len(spec.GetMemory()) > 0 {
+ result.Memory = parseQuantityNoError(ctx, "project", "memory", spec.GetMemory())
}
- if len(spec.EphemeralStorage) > 0 {
- result.EphemeralStorage = parseQuantityNoError(ctx, "project", "ephemeral storage", spec.EphemeralStorage)
+ if len(spec.GetEphemeralStorage()) > 0 {
+ result.EphemeralStorage = parseQuantityNoError(ctx, "project", "ephemeral storage", spec.GetEphemeralStorage())
}
- if len(spec.Gpu) > 0 {
- result.GPU = parseQuantityNoError(ctx, "project", "gpu", spec.Gpu)
+ if len(spec.GetGpu()) > 0 {
+ result.GPU = parseQuantityNoError(ctx, "project", "gpu", spec.GetGpu())
}
return result
@@ -86,14 +86,14 @@ func GetTaskResources(ctx context.Context, id *core.Identifier, resourceManager
request := interfaces.ResourceRequest{
ResourceType: admin.MatchableResource_TASK_RESOURCE,
}
- if id != nil && len(id.Project) > 0 {
- request.Project = id.Project
+ if id != nil && len(id.GetProject()) > 0 {
+ request.Project = id.GetProject()
}
- if id != nil && len(id.Domain) > 0 {
- request.Domain = id.Domain
+ if id != nil && len(id.GetDomain()) > 0 {
+ request.Domain = id.GetDomain()
}
- if id != nil && id.ResourceType == core.ResourceType_WORKFLOW && len(id.Name) > 0 {
- request.Workflow = id.Name
+ if id != nil && id.GetResourceType() == core.ResourceType_WORKFLOW && len(id.GetName()) > 0 {
+ request.Workflow = id.GetName()
}
resource, err := resourceManager.GetResource(ctx, request)
@@ -105,8 +105,8 @@ func GetTaskResources(ctx context.Context, id *core.Identifier, resourceManager
logger.Debugf(ctx, "Assigning task requested resources for [%+v]", id)
var taskResourceAttributes = workflowengineInterfaces.TaskResources{}
if resource != nil && resource.Attributes != nil && resource.Attributes.GetTaskResourceAttributes() != nil {
- taskResourceAttributes.Defaults = fromAdminProtoTaskResourceSpec(ctx, resource.Attributes.GetTaskResourceAttributes().Defaults)
- taskResourceAttributes.Limits = fromAdminProtoTaskResourceSpec(ctx, resource.Attributes.GetTaskResourceAttributes().Limits)
+ taskResourceAttributes.Defaults = fromAdminProtoTaskResourceSpec(ctx, resource.Attributes.GetTaskResourceAttributes().GetDefaults())
+ taskResourceAttributes.Limits = fromAdminProtoTaskResourceSpec(ctx, resource.Attributes.GetTaskResourceAttributes().GetLimits())
} else {
taskResourceAttributes = workflowengineInterfaces.TaskResources{
Defaults: taskResourceConfig.GetDefaults(),
diff --git a/flyteadmin/pkg/manager/impl/util/resources_test.go b/flyteadmin/pkg/manager/impl/util/resources_test.go
index c163b44e0c..932792f307 100644
--- a/flyteadmin/pkg/manager/impl/util/resources_test.go
+++ b/flyteadmin/pkg/manager/impl/util/resources_test.go
@@ -44,9 +44,9 @@ func TestGetTaskResources(t *testing.T) {
resourceManager.GetResourceFunc = func(ctx context.Context,
request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) {
assert.EqualValues(t, request, managerInterfaces.ResourceRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
- Workflow: workflowIdentifier.Name,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
+ Workflow: workflowIdentifier.GetName(),
ResourceType: admin.MatchableResource_TASK_RESOURCE,
})
return &managerInterfaces.ResourceResponse{}, nil
@@ -73,9 +73,9 @@ func TestGetTaskResources(t *testing.T) {
resourceManager.GetResourceFunc = func(ctx context.Context,
request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) {
assert.EqualValues(t, request, managerInterfaces.ResourceRequest{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
- Workflow: workflowIdentifier.Name,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
+ Workflow: workflowIdentifier.GetName(),
ResourceType: admin.MatchableResource_TASK_RESOURCE,
})
return &managerInterfaces.ResourceResponse{
diff --git a/flyteadmin/pkg/manager/impl/util/shared.go b/flyteadmin/pkg/manager/impl/util/shared.go
index 8402451200..690ad32fa3 100644
--- a/flyteadmin/pkg/manager/impl/util/shared.go
+++ b/flyteadmin/pkg/manager/impl/util/shared.go
@@ -22,8 +22,8 @@ import (
)
func GetExecutionName(request *admin.ExecutionCreateRequest) string {
- if request.Name != "" {
- return request.Name
+ if request.GetName() != "" {
+ return request.GetName()
}
return common.GetExecutionName(time.Now().UnixNano())
}
@@ -46,10 +46,10 @@ func GetTask(ctx context.Context, repo repoInterfaces.Repository, identifier *co
func GetWorkflowModel(
ctx context.Context, repo repoInterfaces.Repository, identifier *core.Identifier) (models.Workflow, error) {
workflowModel, err := (repo).WorkflowRepo().Get(ctx, repoInterfaces.Identifier{
- Project: identifier.Project,
- Domain: identifier.Domain,
- Name: identifier.Name,
- Version: identifier.Version,
+ Project: identifier.GetProject(),
+ Domain: identifier.GetDomain(),
+ Name: identifier.GetName(),
+ Version: identifier.GetVersion(),
})
if err != nil {
return models.Workflow{}, err
@@ -87,7 +87,7 @@ func GetWorkflow(
if err != nil {
return nil, err
}
- closure.CreatedAt = workflow.Closure.CreatedAt
+ closure.CreatedAt = workflow.GetClosure().GetCreatedAt()
workflow.Closure = closure
return &workflow, nil
}
@@ -95,10 +95,10 @@ func GetWorkflow(
func GetLaunchPlanModel(
ctx context.Context, repo repoInterfaces.Repository, identifier *core.Identifier) (models.LaunchPlan, error) {
launchPlanModel, err := (repo).LaunchPlanRepo().Get(ctx, repoInterfaces.Identifier{
- Project: identifier.Project,
- Domain: identifier.Domain,
- Name: identifier.Name,
- Version: identifier.Version,
+ Project: identifier.GetProject(),
+ Domain: identifier.GetDomain(),
+ Name: identifier.GetName(),
+ Version: identifier.GetVersion(),
})
if err != nil {
return models.LaunchPlan{}, err
@@ -119,9 +119,9 @@ func GetNamedEntityModel(
ctx context.Context, repo repoInterfaces.Repository, resourceType core.ResourceType, identifier *admin.NamedEntityIdentifier) (models.NamedEntity, error) {
metadataModel, err := (repo).NamedEntityRepo().Get(ctx, repoInterfaces.GetNamedEntityInput{
ResourceType: resourceType,
- Project: identifier.Project,
- Domain: identifier.Domain,
- Name: identifier.Name,
+ Project: identifier.GetProject(),
+ Domain: identifier.GetDomain(),
+ Name: identifier.GetName(),
})
if err != nil {
return models.NamedEntity{}, err
@@ -142,11 +142,11 @@ func GetNamedEntity(
func GetDescriptionEntityModel(
ctx context.Context, repo repoInterfaces.Repository, identifier *core.Identifier) (models.DescriptionEntity, error) {
descriptionEntityModel, err := (repo).DescriptionEntityRepo().Get(ctx, repoInterfaces.GetDescriptionEntityInput{
- ResourceType: identifier.ResourceType,
- Project: identifier.Project,
- Domain: identifier.Domain,
- Name: identifier.Name,
- Version: identifier.Version,
+ ResourceType: identifier.GetResourceType(),
+ Project: identifier.GetProject(),
+ Domain: identifier.GetDomain(),
+ Name: identifier.GetName(),
+ Version: identifier.GetVersion(),
})
if err != nil {
return models.DescriptionEntity{}, err
@@ -211,9 +211,9 @@ func GetExecutionModel(
ctx context.Context, repo repoInterfaces.Repository, identifier *core.WorkflowExecutionIdentifier) (
*models.Execution, error) {
executionModel, err := repo.ExecutionRepo().Get(ctx, repoInterfaces.Identifier{
- Project: identifier.Project,
- Domain: identifier.Domain,
- Name: identifier.Name,
+ Project: identifier.GetProject(),
+ Domain: identifier.GetDomain(),
+ Name: identifier.GetName(),
})
if err != nil {
return nil, err
@@ -236,10 +236,10 @@ func GetNodeExecutionModel(ctx context.Context, repo repoInterfaces.Repository,
func GetTaskModel(ctx context.Context, repo repoInterfaces.Repository, taskIdentifier *core.Identifier) (
*models.Task, error) {
taskModel, err := repo.TaskRepo().Get(ctx, repoInterfaces.Identifier{
- Project: taskIdentifier.Project,
- Domain: taskIdentifier.Domain,
- Name: taskIdentifier.Name,
- Version: taskIdentifier.Version,
+ Project: taskIdentifier.GetProject(),
+ Domain: taskIdentifier.GetDomain(),
+ Name: taskIdentifier.GetName(),
+ Version: taskIdentifier.GetVersion(),
})
if err != nil {
@@ -305,15 +305,15 @@ func MergeIntoExecConfig(workflowExecConfig *admin.WorkflowExecutionConfig, spec
// Hence we do a deep check in the following conditions before assignment
if (workflowExecConfig.GetRawOutputDataConfig() == nil ||
len(workflowExecConfig.GetRawOutputDataConfig().GetOutputLocationPrefix()) == 0) &&
- (spec.GetRawOutputDataConfig() != nil && len(spec.GetRawOutputDataConfig().OutputLocationPrefix) > 0) {
+ (spec.GetRawOutputDataConfig() != nil && len(spec.GetRawOutputDataConfig().GetOutputLocationPrefix()) > 0) {
workflowExecConfig.RawOutputDataConfig = spec.GetRawOutputDataConfig()
}
- if (workflowExecConfig.GetLabels() == nil || len(workflowExecConfig.GetLabels().Values) == 0) &&
- (spec.GetLabels() != nil && len(spec.GetLabels().Values) > 0) {
+ if (workflowExecConfig.GetLabels() == nil || len(workflowExecConfig.GetLabels().GetValues()) == 0) &&
+ (spec.GetLabels() != nil && len(spec.GetLabels().GetValues()) > 0) {
workflowExecConfig.Labels = spec.GetLabels()
}
- if (workflowExecConfig.GetAnnotations() == nil || len(workflowExecConfig.GetAnnotations().Values) == 0) &&
- (spec.GetAnnotations() != nil && len(spec.GetAnnotations().Values) > 0) {
+ if (workflowExecConfig.GetAnnotations() == nil || len(workflowExecConfig.GetAnnotations().GetValues()) == 0) &&
+ (spec.GetAnnotations() != nil && len(spec.GetAnnotations().GetValues()) > 0) {
workflowExecConfig.Annotations = spec.GetAnnotations()
}
@@ -325,8 +325,8 @@ func MergeIntoExecConfig(workflowExecConfig *admin.WorkflowExecutionConfig, spec
workflowExecConfig.OverwriteCache = spec.GetOverwriteCache()
}
- if (workflowExecConfig.GetEnvs() == nil || len(workflowExecConfig.GetEnvs().Values) == 0) &&
- (spec.GetEnvs() != nil && len(spec.GetEnvs().Values) > 0) {
+ if (workflowExecConfig.GetEnvs() == nil || len(workflowExecConfig.GetEnvs().GetValues()) == 0) &&
+ (spec.GetEnvs() != nil && len(spec.GetEnvs().GetValues()) > 0) {
workflowExecConfig.Envs = spec.GetEnvs()
}
diff --git a/flyteadmin/pkg/manager/impl/util/shared_test.go b/flyteadmin/pkg/manager/impl/util/shared_test.go
index b9b296971e..09cb172638 100644
--- a/flyteadmin/pkg/manager/impl/util/shared_test.go
+++ b/flyteadmin/pkg/manager/impl/util/shared_test.go
@@ -81,10 +81,10 @@ func TestGetTask(t *testing.T) {
})
assert.NoError(t, err)
assert.NotNil(t, task)
- assert.Equal(t, project, task.Id.Project)
- assert.Equal(t, domain, task.Id.Domain)
- assert.Equal(t, name, task.Id.Name)
- assert.Equal(t, version, task.Id.Version)
+ assert.Equal(t, project, task.GetId().GetProject())
+ assert.Equal(t, domain, task.GetId().GetDomain())
+ assert.Equal(t, name, task.GetId().GetName())
+ assert.Equal(t, version, task.GetId().GetVersion())
}
func TestGetTask_DatabaseError(t *testing.T) {
@@ -326,10 +326,10 @@ func TestGetLaunchPlan(t *testing.T) {
})
assert.Nil(t, err)
assert.NotNil(t, launchPlan)
- assert.Equal(t, project, launchPlan.Id.Project)
- assert.Equal(t, domain, launchPlan.Id.Domain)
- assert.Equal(t, name, launchPlan.Id.Name)
- assert.Equal(t, version, launchPlan.Id.Version)
+ assert.Equal(t, project, launchPlan.GetId().GetProject())
+ assert.Equal(t, domain, launchPlan.GetId().GetDomain())
+ assert.Equal(t, name, launchPlan.GetId().GetName())
+ assert.Equal(t, version, launchPlan.GetId().GetVersion())
}
func TestGetLaunchPlan_TransformerError(t *testing.T) {
@@ -443,11 +443,11 @@ func TestGetNamedEntity(t *testing.T) {
})
assert.Nil(t, err)
assert.NotNil(t, entity)
- assert.Equal(t, project, entity.Id.Project)
- assert.Equal(t, domain, entity.Id.Domain)
- assert.Equal(t, name, entity.Id.Name)
- assert.Equal(t, description, entity.Metadata.Description)
- assert.Equal(t, resourceType, entity.ResourceType)
+ assert.Equal(t, project, entity.GetId().GetProject())
+ assert.Equal(t, domain, entity.GetId().GetDomain())
+ assert.Equal(t, name, entity.GetId().GetName())
+ assert.Equal(t, description, entity.GetMetadata().GetDescription())
+ assert.Equal(t, resourceType, entity.GetResourceType())
}
func TestGetActiveLaunchPlanVersionFilters(t *testing.T) {
@@ -505,7 +505,7 @@ func TestGetMatchableResource(t *testing.T) {
}
mr, err := GetMatchableResource(context.Background(), resourceManager, resourceType, project, domain, "")
- assert.Equal(t, int32(12), mr.Attributes.GetWorkflowExecutionConfig().MaxParallelism)
+ assert.Equal(t, int32(12), mr.Attributes.GetWorkflowExecutionConfig().GetMaxParallelism())
assert.Nil(t, err)
})
t.Run("successful fetch workflow matchable", func(t *testing.T) {
@@ -530,7 +530,7 @@ func TestGetMatchableResource(t *testing.T) {
}
mr, err := GetMatchableResource(context.Background(), resourceManager, resourceType, project, domain, workflow)
- assert.Equal(t, int32(12), mr.Attributes.GetWorkflowExecutionConfig().MaxParallelism)
+ assert.Equal(t, int32(12), mr.Attributes.GetWorkflowExecutionConfig().GetMaxParallelism())
assert.Nil(t, err)
})
@@ -614,7 +614,7 @@ func TestGetDescriptionEntity(t *testing.T) {
})
assert.Nil(t, err)
assert.NotNil(t, entity)
- assert.Equal(t, "hello world", entity.ShortDescription)
+ assert.Equal(t, "hello world", entity.GetShortDescription())
})
t.Run("Failed to get DescriptionEntity", func(t *testing.T) {
diff --git a/flyteadmin/pkg/manager/impl/util/single_task_execution.go b/flyteadmin/pkg/manager/impl/util/single_task_execution.go
index 036610a9ec..ff7a8b70ce 100644
--- a/flyteadmin/pkg/manager/impl/util/single_task_execution.go
+++ b/flyteadmin/pkg/manager/impl/util/single_task_execution.go
@@ -48,8 +48,8 @@ func generateWorkflowNameFromTask(taskName string) string {
}
func generateBindings(outputs *core.VariableMap, nodeID string) []*core.Binding {
- bindings := make([]*core.Binding, 0, len(outputs.Variables))
- for key := range outputs.Variables {
+ bindings := make([]*core.Binding, 0, len(outputs.GetVariables()))
+ for key := range outputs.GetVariables() {
binding := &core.Binding{
Var: key,
Binding: &core.BindingData{
@@ -73,16 +73,16 @@ func CreateOrGetWorkflowModel(
task *admin.Task) (*models.Workflow, error) {
workflowIdentifier := core.Identifier{
ResourceType: core.ResourceType_WORKFLOW,
- Project: taskIdentifier.Project,
- Domain: taskIdentifier.Domain,
- Name: generateWorkflowNameFromTask(taskIdentifier.Name),
- Version: taskIdentifier.Version,
+ Project: taskIdentifier.GetProject(),
+ Domain: taskIdentifier.GetDomain(),
+ Name: generateWorkflowNameFromTask(taskIdentifier.GetName()),
+ Version: taskIdentifier.GetVersion(),
}
workflowModel, err := db.WorkflowRepo().Get(ctx, repositoryInterfaces.Identifier{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
- Name: workflowIdentifier.Name,
- Version: workflowIdentifier.Version,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
+ Name: workflowIdentifier.GetName(),
+ Version: workflowIdentifier.GetVersion(),
})
var retryStrategy *core.RetryStrategy
@@ -100,15 +100,15 @@ func CreateOrGetWorkflowModel(
workflowSpec := admin.WorkflowSpec{
Template: &core.WorkflowTemplate{
Id: &workflowIdentifier,
- Interface: task.Closure.CompiledTask.Template.Interface,
+ Interface: task.GetClosure().GetCompiledTask().GetTemplate().GetInterface(),
Nodes: []*core.Node{
{
- Id: generateNodeNameFromTask(taskIdentifier.Name),
+ Id: generateNodeNameFromTask(taskIdentifier.GetName()),
Metadata: &core.NodeMetadata{
- Name: generateNodeNameFromTask(taskIdentifier.Name),
+ Name: generateNodeNameFromTask(taskIdentifier.GetName()),
Retries: retryStrategy,
},
- Inputs: generateBindings(task.Closure.CompiledTask.Template.Interface.Inputs, noInputNodeID),
+ Inputs: generateBindings(task.GetClosure().GetCompiledTask().GetTemplate().GetInterface().GetInputs(), noInputNodeID),
Target: &core.Node_TaskNode{
TaskNode: &core.TaskNode{
Reference: &core.TaskNode_ReferenceId{
@@ -119,7 +119,7 @@ func CreateOrGetWorkflowModel(
},
},
- Outputs: generateBindings(task.Closure.CompiledTask.Template.Interface.Outputs, generateNodeNameFromTask(taskIdentifier.Name)),
+ Outputs: generateBindings(task.GetClosure().GetCompiledTask().GetTemplate().GetInterface().GetOutputs(), generateNodeNameFromTask(taskIdentifier.GetName())),
},
}
@@ -138,9 +138,9 @@ func CreateOrGetWorkflowModel(
_, err = namedEntityManager.UpdateNamedEntity(ctx, &admin.NamedEntityUpdateRequest{
ResourceType: core.ResourceType_WORKFLOW,
Id: &admin.NamedEntityIdentifier{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
- Name: workflowIdentifier.Name,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
+ Name: workflowIdentifier.GetName(),
},
Metadata: &admin.NamedEntityMetadata{State: admin.NamedEntityState_SYSTEM_GENERATED},
})
@@ -149,10 +149,10 @@ func CreateOrGetWorkflowModel(
return nil, err
}
workflowModel, err = db.WorkflowRepo().Get(ctx, repositoryInterfaces.Identifier{
- Project: workflowIdentifier.Project,
- Domain: workflowIdentifier.Domain,
- Name: workflowIdentifier.Name,
- Version: workflowIdentifier.Version,
+ Project: workflowIdentifier.GetProject(),
+ Domain: workflowIdentifier.GetDomain(),
+ Name: workflowIdentifier.GetName(),
+ Version: workflowIdentifier.GetVersion(),
})
if err != nil {
// This is unexpected - at this point we've successfully just created the skeleton workflow.
@@ -171,10 +171,10 @@ func CreateOrGetLaunchPlan(ctx context.Context,
var err error
launchPlanIdentifier := &core.Identifier{
ResourceType: core.ResourceType_LAUNCH_PLAN,
- Project: taskIdentifier.Project,
- Domain: taskIdentifier.Domain,
- Name: generateWorkflowNameFromTask(taskIdentifier.Name),
- Version: taskIdentifier.Version,
+ Project: taskIdentifier.GetProject(),
+ Domain: taskIdentifier.GetDomain(),
+ Name: generateWorkflowNameFromTask(taskIdentifier.GetName()),
+ Version: taskIdentifier.GetVersion(),
}
launchPlan, err = GetLaunchPlan(ctx, db, launchPlanIdentifier)
if err != nil {
@@ -188,29 +188,29 @@ func CreateOrGetLaunchPlan(ctx context.Context,
Spec: &admin.LaunchPlanSpec{
WorkflowId: &core.Identifier{
ResourceType: core.ResourceType_WORKFLOW,
- Project: taskIdentifier.Project,
- Domain: taskIdentifier.Domain,
- Name: taskIdentifier.Name,
- Version: taskIdentifier.Version,
+ Project: taskIdentifier.GetProject(),
+ Domain: taskIdentifier.GetDomain(),
+ Name: taskIdentifier.GetName(),
+ Version: taskIdentifier.GetVersion(),
},
EntityMetadata: &admin.LaunchPlanMetadata{},
DefaultInputs: &core.ParameterMap{},
FixedInputs: &core.LiteralMap{},
Labels: &admin.Labels{},
Annotations: &admin.Annotations{},
- AuthRole: spec.AuthRole,
- SecurityContext: spec.SecurityContext,
+ AuthRole: spec.GetAuthRole(),
+ SecurityContext: spec.GetSecurityContext(),
},
}
if err := validation.ValidateLaunchPlan(ctx, generatedCreateLaunchPlanReq, db, config.ApplicationConfiguration(), workflowInterface); err != nil {
logger.Debugf(ctx, "could not create launch plan: %+v, request failed validation with err: %v", taskIdentifier, err)
return nil, err
}
- transformedLaunchPlan := transformers.CreateLaunchPlan(generatedCreateLaunchPlanReq, workflowInterface.Outputs)
+ transformedLaunchPlan := transformers.CreateLaunchPlan(generatedCreateLaunchPlanReq, workflowInterface.GetOutputs())
launchPlan = transformedLaunchPlan
launchPlanDigest, err := GetLaunchPlanDigest(ctx, launchPlan)
if err != nil {
- logger.Errorf(ctx, "failed to compute launch plan digest for [%+v] with err: %v", launchPlan.Id, err)
+ logger.Errorf(ctx, "failed to compute launch plan digest for [%+v] with err: %v", launchPlan.GetId(), err)
return nil, err
}
launchPlanModel, err :=
@@ -218,7 +218,7 @@ func CreateOrGetLaunchPlan(ctx context.Context,
if err != nil {
logger.Errorf(ctx,
"Failed to transform launch plan model [%+v], and workflow outputs [%+v] with err: %v",
- taskIdentifier, workflowInterface.Outputs, err)
+ taskIdentifier, workflowInterface.GetOutputs(), err)
return nil, err
}
err = db.LaunchPlanRepo().Create(ctx, launchPlanModel)
diff --git a/flyteadmin/pkg/manager/impl/util/single_task_execution_test.go b/flyteadmin/pkg/manager/impl/util/single_task_execution_test.go
index 13ed4a945d..d0aff9edef 100644
--- a/flyteadmin/pkg/manager/impl/util/single_task_execution_test.go
+++ b/flyteadmin/pkg/manager/impl/util/single_task_execution_test.go
@@ -88,13 +88,13 @@ func TestCreateOrGetWorkflowModel(t *testing.T) {
mockNamedEntityManager := managerMocks.NamedEntityManager{}
mockNamedEntityManager.UpdateNamedEntityFunc = func(ctx context.Context, request *admin.NamedEntityUpdateRequest) (*admin.NamedEntityUpdateResponse, error) {
- assert.Equal(t, request.ResourceType, core.ResourceType_WORKFLOW)
- assert.True(t, proto.Equal(request.Id, &admin.NamedEntityIdentifier{
+ assert.Equal(t, request.GetResourceType(), core.ResourceType_WORKFLOW)
+ assert.True(t, proto.Equal(request.GetId(), &admin.NamedEntityIdentifier{
Project: "flytekit",
Domain: "production",
Name: ".flytegen.app.workflows.MyWorkflow.my_task",
- }), fmt.Sprintf("%+v", request.Id))
- assert.True(t, proto.Equal(request.Metadata, &admin.NamedEntityMetadata{
+ }), fmt.Sprintf("%+v", request.GetId()))
+ assert.True(t, proto.Equal(request.GetMetadata(), &admin.NamedEntityMetadata{
State: admin.NamedEntityState_SYSTEM_GENERATED,
}))
return &admin.NamedEntityUpdateResponse{}, nil
@@ -102,13 +102,13 @@ func TestCreateOrGetWorkflowModel(t *testing.T) {
mockWorkflowManager := managerMocks.MockWorkflowManager{}
mockWorkflowManager.SetCreateCallback(func(ctx context.Context, request *admin.WorkflowCreateRequest) (*admin.WorkflowCreateResponse, error) {
- assert.True(t, proto.Equal(request.Id, &core.Identifier{
+ assert.True(t, proto.Equal(request.GetId(), &core.Identifier{
ResourceType: core.ResourceType_WORKFLOW,
Project: "flytekit",
Domain: "production",
Name: ".flytegen.app.workflows.MyWorkflow.my_task",
Version: "12345",
- }), fmt.Sprintf("%+v", request.Id))
+ }), fmt.Sprintf("%+v", request.GetId()))
assert.Len(t, request.GetSpec().GetTemplate().GetNodes(), 1)
assert.Equal(t, request.GetSpec().GetTemplate().GetNodes()[0].GetMetadata().GetRetries().GetRetries(), uint32(2))
@@ -220,13 +220,13 @@ func TestCreateOrGetLaunchPlan(t *testing.T) {
mockNamedEntityManager := managerMocks.NamedEntityManager{}
mockNamedEntityManager.UpdateNamedEntityFunc = func(ctx context.Context, request *admin.NamedEntityUpdateRequest) (*admin.NamedEntityUpdateResponse, error) {
- assert.Equal(t, request.ResourceType, core.ResourceType_LAUNCH_PLAN)
- assert.True(t, proto.Equal(request.Id, &admin.NamedEntityIdentifier{
+ assert.Equal(t, request.GetResourceType(), core.ResourceType_LAUNCH_PLAN)
+ assert.True(t, proto.Equal(request.GetId(), &admin.NamedEntityIdentifier{
Project: "flytekit",
Domain: "production",
Name: ".flytegen.app.workflows.MyWorkflow.my_task",
- }), fmt.Sprintf("%+v", request.Id))
- assert.True(t, proto.Equal(request.Metadata, &admin.NamedEntityMetadata{
+ }), fmt.Sprintf("%+v", request.GetId()))
+ assert.True(t, proto.Equal(request.GetMetadata(), &admin.NamedEntityMetadata{
State: admin.NamedEntityState_SYSTEM_GENERATED,
}))
return &admin.NamedEntityUpdateResponse{}, nil
@@ -256,7 +256,7 @@ func TestCreateOrGetLaunchPlan(t *testing.T) {
Domain: "production",
Name: ".flytegen.app.workflows.MyWorkflow.my_task",
Version: "12345",
- }, launchPlan.Id))
- assert.True(t, proto.Equal(launchPlan.Closure.ExpectedOutputs, workflowInterface.Outputs))
- assert.True(t, proto.Equal(launchPlan.Spec.AuthRole, spec.AuthRole))
+ }, launchPlan.GetId()))
+ assert.True(t, proto.Equal(launchPlan.GetClosure().GetExpectedOutputs(), workflowInterface.GetOutputs()))
+ assert.True(t, proto.Equal(launchPlan.GetSpec().GetAuthRole(), spec.GetAuthRole()))
}
diff --git a/flyteadmin/pkg/manager/impl/validation/attributes_validator.go b/flyteadmin/pkg/manager/impl/validation/attributes_validator.go
index bfaccd80a1..99929513b5 100644
--- a/flyteadmin/pkg/manager/impl/validation/attributes_validator.go
+++ b/flyteadmin/pkg/manager/impl/validation/attributes_validator.go
@@ -42,15 +42,15 @@ func ValidateProjectDomainAttributesUpdateRequest(ctx context.Context,
db repositoryInterfaces.Repository, config runtimeInterfaces.ApplicationConfiguration,
request *admin.ProjectDomainAttributesUpdateRequest) (
admin.MatchableResource, error) {
- if request.Attributes == nil {
+ if request.GetAttributes() == nil {
return defaultMatchableResource, shared.GetMissingArgumentError(shared.Attributes)
}
- if err := ValidateProjectAndDomain(ctx, db, config, request.Attributes.Project, request.Attributes.Domain); err != nil {
+ if err := ValidateProjectAndDomain(ctx, db, config, request.GetAttributes().GetProject(), request.GetAttributes().GetDomain()); err != nil {
return defaultMatchableResource, err
}
- return validateMatchingAttributes(request.Attributes.MatchingAttributes,
- fmt.Sprintf("%s-%s", request.Attributes.Project, request.Attributes.Domain))
+ return validateMatchingAttributes(request.GetAttributes().GetMatchingAttributes(),
+ fmt.Sprintf("%s-%s", request.GetAttributes().GetProject(), request.GetAttributes().GetDomain()))
}
func ValidateProjectAttributesUpdateRequest(ctx context.Context,
@@ -58,19 +58,19 @@ func ValidateProjectAttributesUpdateRequest(ctx context.Context,
request *admin.ProjectAttributesUpdateRequest) (
admin.MatchableResource, error) {
- if request.Attributes == nil {
+ if request.GetAttributes() == nil {
return defaultMatchableResource, shared.GetMissingArgumentError(shared.Attributes)
}
- if err := ValidateProjectForUpdate(ctx, db, request.Attributes.Project); err != nil {
+ if err := ValidateProjectForUpdate(ctx, db, request.GetAttributes().GetProject()); err != nil {
return defaultMatchableResource, err
}
- return validateMatchingAttributes(request.Attributes.MatchingAttributes, request.Attributes.Project)
+ return validateMatchingAttributes(request.GetAttributes().GetMatchingAttributes(), request.GetAttributes().GetProject())
}
func ValidateProjectDomainAttributesGetRequest(ctx context.Context, db repositoryInterfaces.Repository,
config runtimeInterfaces.ApplicationConfiguration, request *admin.ProjectDomainAttributesGetRequest) error {
- if err := ValidateProjectAndDomain(ctx, db, config, request.Project, request.Domain); err != nil {
+ if err := ValidateProjectAndDomain(ctx, db, config, request.GetProject(), request.GetDomain()); err != nil {
return err
}
@@ -79,7 +79,7 @@ func ValidateProjectDomainAttributesGetRequest(ctx context.Context, db repositor
func ValidateProjectDomainAttributesDeleteRequest(ctx context.Context, db repositoryInterfaces.Repository,
config runtimeInterfaces.ApplicationConfiguration, request *admin.ProjectDomainAttributesDeleteRequest) error {
- if err := ValidateProjectAndDomain(ctx, db, config, request.Project, request.Domain); err != nil {
+ if err := ValidateProjectAndDomain(ctx, db, config, request.GetProject(), request.GetDomain()); err != nil {
return err
}
@@ -89,26 +89,26 @@ func ValidateProjectDomainAttributesDeleteRequest(ctx context.Context, db reposi
func ValidateWorkflowAttributesUpdateRequest(ctx context.Context, db repositoryInterfaces.Repository,
config runtimeInterfaces.ApplicationConfiguration, request *admin.WorkflowAttributesUpdateRequest) (
admin.MatchableResource, error) {
- if request.Attributes == nil {
+ if request.GetAttributes() == nil {
return defaultMatchableResource, shared.GetMissingArgumentError(shared.Attributes)
}
- if err := ValidateProjectAndDomain(ctx, db, config, request.Attributes.Project, request.Attributes.Domain); err != nil {
+ if err := ValidateProjectAndDomain(ctx, db, config, request.GetAttributes().GetProject(), request.GetAttributes().GetDomain()); err != nil {
return defaultMatchableResource, err
}
- if err := ValidateEmptyStringField(request.Attributes.Workflow, shared.Name); err != nil {
+ if err := ValidateEmptyStringField(request.GetAttributes().GetWorkflow(), shared.Name); err != nil {
return defaultMatchableResource, err
}
- return validateMatchingAttributes(request.Attributes.MatchingAttributes,
- fmt.Sprintf("%s-%s-%s", request.Attributes.Project, request.Attributes.Domain, request.Attributes.Workflow))
+ return validateMatchingAttributes(request.GetAttributes().GetMatchingAttributes(),
+ fmt.Sprintf("%s-%s-%s", request.GetAttributes().GetProject(), request.GetAttributes().GetDomain(), request.GetAttributes().GetWorkflow()))
}
func ValidateWorkflowAttributesGetRequest(ctx context.Context, db repositoryInterfaces.Repository,
config runtimeInterfaces.ApplicationConfiguration, request *admin.WorkflowAttributesGetRequest) error {
- if err := ValidateProjectAndDomain(ctx, db, config, request.Project, request.Domain); err != nil {
+ if err := ValidateProjectAndDomain(ctx, db, config, request.GetProject(), request.GetDomain()); err != nil {
return err
}
- if err := ValidateEmptyStringField(request.Workflow, shared.Name); err != nil {
+ if err := ValidateEmptyStringField(request.GetWorkflow(), shared.Name); err != nil {
return err
}
@@ -117,10 +117,10 @@ func ValidateWorkflowAttributesGetRequest(ctx context.Context, db repositoryInte
func ValidateWorkflowAttributesDeleteRequest(ctx context.Context, db repositoryInterfaces.Repository,
config runtimeInterfaces.ApplicationConfiguration, request *admin.WorkflowAttributesDeleteRequest) error {
- if err := ValidateProjectAndDomain(ctx, db, config, request.Project, request.Domain); err != nil {
+ if err := ValidateProjectAndDomain(ctx, db, config, request.GetProject(), request.GetDomain()); err != nil {
return err
}
- if err := ValidateEmptyStringField(request.Workflow, shared.Name); err != nil {
+ if err := ValidateEmptyStringField(request.GetWorkflow(), shared.Name); err != nil {
return err
}
@@ -128,7 +128,7 @@ func ValidateWorkflowAttributesDeleteRequest(ctx context.Context, db repositoryI
}
func ValidateListAllMatchableAttributesRequest(request *admin.ListMatchableAttributesRequest) error {
- if _, ok := admin.MatchableResource_name[int32(request.ResourceType)]; !ok {
+ if _, ok := admin.MatchableResource_name[int32(request.GetResourceType())]; !ok {
return shared.GetInvalidArgumentError(shared.ResourceType)
}
return nil
diff --git a/flyteadmin/pkg/manager/impl/validation/execution_validator.go b/flyteadmin/pkg/manager/impl/validation/execution_validator.go
index f5fd30598a..c9f357b525 100644
--- a/flyteadmin/pkg/manager/impl/validation/execution_validator.go
+++ b/flyteadmin/pkg/manager/impl/validation/execution_validator.go
@@ -28,47 +28,47 @@ var acceptedReferenceLaunchTypes = map[core.ResourceType]interface{}{
func ValidateExecutionRequest(ctx context.Context, request *admin.ExecutionCreateRequest,
db repositoryInterfaces.Repository, config runtimeInterfaces.ApplicationConfiguration) error {
- if err := ValidateEmptyStringField(request.Project, shared.Project); err != nil {
+ if err := ValidateEmptyStringField(request.GetProject(), shared.Project); err != nil {
return err
}
- if err := ValidateEmptyStringField(request.Domain, shared.Domain); err != nil {
+ if err := ValidateEmptyStringField(request.GetDomain(), shared.Domain); err != nil {
return err
}
- if request.Name != "" {
- if err := CheckValidExecutionID(strings.ToLower(request.Name), shared.Name); err != nil {
+ if request.GetName() != "" {
+ if err := CheckValidExecutionID(strings.ToLower(request.GetName()), shared.Name); err != nil {
return err
}
}
- if len(request.Name) > allowedExecutionNameLength {
+ if len(request.GetName()) > allowedExecutionNameLength {
return errors.NewFlyteAdminErrorf(codes.InvalidArgument,
"name for ExecutionCreateRequest [%+v] exceeded allowed length %d", request, allowedExecutionNameLength)
}
- if err := ValidateProjectAndDomain(ctx, db, config, request.Project, request.Domain); err != nil {
+ if err := ValidateProjectAndDomain(ctx, db, config, request.GetProject(), request.GetDomain()); err != nil {
return err
}
- if request.Spec == nil {
+ if request.GetSpec() == nil {
return shared.GetMissingArgumentError(shared.Spec)
}
// TODO(katrogan): Change the name of Spec.LaunchPlan to something more generic to permit reference Tasks.
// https://github.com/flyteorg/flyte/issues/262
- if err := ValidateIdentifierFieldsSet(request.Spec.LaunchPlan); err != nil {
+ if err := ValidateIdentifierFieldsSet(request.GetSpec().GetLaunchPlan()); err != nil {
return err
}
- if _, ok := acceptedReferenceLaunchTypes[request.Spec.LaunchPlan.ResourceType]; !ok {
+ if _, ok := acceptedReferenceLaunchTypes[request.GetSpec().GetLaunchPlan().GetResourceType()]; !ok {
return errors.NewFlyteAdminErrorf(codes.InvalidArgument,
"Invalid reference entity resource type [%v], only [%+v] allowed",
- request.Spec.LaunchPlan.ResourceType, acceptedReferenceLaunchTypes)
+ request.GetSpec().GetLaunchPlan().GetResourceType(), acceptedReferenceLaunchTypes)
}
- if err := validateLiteralMap(request.Inputs, shared.Inputs); err != nil {
+ if err := validateLiteralMap(request.GetInputs(), shared.Inputs); err != nil {
return err
}
- if request.Spec.GetNotifications() != nil {
- if err := validateNotifications(request.Spec.GetNotifications().Notifications); err != nil {
+ if request.GetSpec().GetNotifications() != nil {
+ if err := validateNotifications(request.GetSpec().GetNotifications().GetNotifications()); err != nil {
return err
}
}
- if err := validateLabels(request.Spec.Labels); err != nil {
+ if err := validateLabels(request.GetSpec().GetLabels()); err != nil {
return err
}
return nil
@@ -140,14 +140,14 @@ func CheckValidExecutionID(executionID, fieldName string) error {
}
func ValidateCreateWorkflowEventRequest(request *admin.WorkflowExecutionEventRequest, maxOutputSizeInBytes int64) error {
- if request.Event == nil {
+ if request.GetEvent() == nil {
return errors.NewFlyteAdminErrorf(codes.InvalidArgument,
"Workflow event handler was called without event")
- } else if request.Event.ExecutionId == nil {
+ } else if request.GetEvent().GetExecutionId() == nil {
return errors.NewFlyteAdminErrorf(codes.InvalidArgument,
- "Workflow event handler request event doesn't have an execution id - %v", request.Event)
+ "Workflow event handler request event doesn't have an execution id - %v", request.GetEvent())
}
- if err := ValidateOutputData(request.Event.GetOutputData(), maxOutputSizeInBytes); err != nil {
+ if err := ValidateOutputData(request.GetEvent().GetOutputData(), maxOutputSizeInBytes); err != nil {
return err
}
return nil
@@ -157,13 +157,13 @@ func ValidateWorkflowExecutionIdentifier(identifier *core.WorkflowExecutionIdent
if identifier == nil {
return shared.GetMissingArgumentError(shared.ID)
}
- if err := ValidateEmptyStringField(identifier.Project, shared.Project); err != nil {
+ if err := ValidateEmptyStringField(identifier.GetProject(), shared.Project); err != nil {
return err
}
- if err := ValidateEmptyStringField(identifier.Domain, shared.Domain); err != nil {
+ if err := ValidateEmptyStringField(identifier.GetDomain(), shared.Domain); err != nil {
return err
}
- if err := ValidateEmptyStringField(identifier.Name, shared.Name); err != nil {
+ if err := ValidateEmptyStringField(identifier.GetName(), shared.Name); err != nil {
return err
}
return nil
diff --git a/flyteadmin/pkg/manager/impl/validation/execution_validator_test.go b/flyteadmin/pkg/manager/impl/validation/execution_validator_test.go
index 943e5006e7..89e97370fa 100644
--- a/flyteadmin/pkg/manager/impl/validation/execution_validator_test.go
+++ b/flyteadmin/pkg/manager/impl/validation/execution_validator_test.go
@@ -92,9 +92,9 @@ func TestGetExecutionInputs(t *testing.T) {
lpRequest := testutils.GetLaunchPlanRequest()
actualInputs, err := CheckAndFetchInputsForExecution(
- executionRequest.Inputs,
- lpRequest.Spec.FixedInputs,
- lpRequest.Spec.DefaultInputs,
+ executionRequest.GetInputs(),
+ lpRequest.GetSpec().GetFixedInputs(),
+ lpRequest.GetSpec().GetDefaultInputs(),
)
expectedMap := &core.LiteralMap{
Literals: map[string]*core.Literal{
@@ -125,9 +125,9 @@ func TestGetExecutionWithOffloadedInputs(t *testing.T) {
lpRequest := testutils.GetLaunchPlanRequest()
actualInputs, err := CheckAndFetchInputsForExecution(
- executionRequest.Inputs,
- lpRequest.Spec.FixedInputs,
- lpRequest.Spec.DefaultInputs,
+ executionRequest.GetInputs(),
+ lpRequest.GetSpec().GetFixedInputs(),
+ lpRequest.GetSpec().GetDefaultInputs(),
)
expectedMap := core.LiteralMap{
Literals: map[string]*core.Literal{
@@ -137,8 +137,8 @@ func TestGetExecutionWithOffloadedInputs(t *testing.T) {
}
assert.Nil(t, err)
assert.NotNil(t, actualInputs)
- assert.EqualValues(t, expectedMap.GetLiterals()["foo"], actualInputs.Literals["foo"])
- assert.EqualValues(t, expectedMap.GetLiterals()["bar"], actualInputs.Literals["bar"])
+ assert.EqualValues(t, expectedMap.GetLiterals()["foo"], actualInputs.GetLiterals()["foo"])
+ assert.EqualValues(t, expectedMap.GetLiterals()["bar"], actualInputs.GetLiterals()["bar"])
}
func TestValidateExecInputsWrongType(t *testing.T) {
@@ -150,9 +150,9 @@ func TestValidateExecInputsWrongType(t *testing.T) {
},
}
_, err := CheckAndFetchInputsForExecution(
- executionRequest.Inputs,
- lpRequest.Spec.FixedInputs,
- lpRequest.Spec.DefaultInputs,
+ executionRequest.GetInputs(),
+ lpRequest.GetSpec().GetFixedInputs(),
+ lpRequest.GetSpec().GetDefaultInputs(),
)
utils.AssertEqualWithSanitizedRegex(t, "invalid foo input wrong type. Expected simple:STRING, but got simple:INTEGER", err.Error())
}
@@ -167,9 +167,9 @@ func TestValidateExecInputsExtraInputs(t *testing.T) {
},
}
_, err := CheckAndFetchInputsForExecution(
- executionRequest.Inputs,
- lpRequest.Spec.FixedInputs,
- lpRequest.Spec.DefaultInputs,
+ executionRequest.GetInputs(),
+ lpRequest.GetSpec().GetFixedInputs(),
+ lpRequest.GetSpec().GetDefaultInputs(),
)
assert.EqualError(t, err, "invalid input foo-extra")
}
@@ -184,9 +184,9 @@ func TestValidateExecInputsOverrideFixed(t *testing.T) {
},
}
_, err := CheckAndFetchInputsForExecution(
- executionRequest.Inputs,
- lpRequest.Spec.FixedInputs,
- lpRequest.Spec.DefaultInputs,
+ executionRequest.GetInputs(),
+ lpRequest.GetSpec().GetFixedInputs(),
+ lpRequest.GetSpec().GetDefaultInputs(),
)
assert.EqualError(t, err, "invalid input bar")
}
@@ -196,9 +196,9 @@ func TestValidateExecEmptyInputs(t *testing.T) {
lpRequest := testutils.GetLaunchPlanRequest()
executionRequest.Inputs = nil
actualInputs, err := CheckAndFetchInputsForExecution(
- executionRequest.Inputs,
- lpRequest.Spec.FixedInputs,
- lpRequest.Spec.DefaultInputs,
+ executionRequest.GetInputs(),
+ lpRequest.GetSpec().GetFixedInputs(),
+ lpRequest.GetSpec().GetDefaultInputs(),
)
expectedMap := &core.LiteralMap{
Literals: map[string]*core.Literal{
diff --git a/flyteadmin/pkg/manager/impl/validation/launch_plan_validator.go b/flyteadmin/pkg/manager/impl/validation/launch_plan_validator.go
index eb49b10089..0308faceba 100644
--- a/flyteadmin/pkg/manager/impl/validation/launch_plan_validator.go
+++ b/flyteadmin/pkg/manager/impl/validation/launch_plan_validator.go
@@ -19,36 +19,36 @@ import (
func ValidateLaunchPlan(ctx context.Context,
request *admin.LaunchPlanCreateRequest, db repositoryInterfaces.Repository,
config runtimeInterfaces.ApplicationConfiguration, workflowInterface *core.TypedInterface) error {
- if err := ValidateIdentifier(request.Id, common.LaunchPlan); err != nil {
+ if err := ValidateIdentifier(request.GetId(), common.LaunchPlan); err != nil {
return err
}
- if err := ValidateProjectAndDomain(ctx, db, config, request.Id.Project, request.Id.Domain); err != nil {
+ if err := ValidateProjectAndDomain(ctx, db, config, request.GetId().GetProject(), request.GetId().GetDomain()); err != nil {
return err
}
- if request.Spec == nil {
+ if request.GetSpec() == nil {
return shared.GetMissingArgumentError(shared.Spec)
}
- if err := ValidateIdentifier(request.Spec.WorkflowId, common.Workflow); err != nil {
+ if err := ValidateIdentifier(request.GetSpec().GetWorkflowId(), common.Workflow); err != nil {
return err
}
- if err := validateLabels(request.Spec.Labels); err != nil {
+ if err := validateLabels(request.GetSpec().GetLabels()); err != nil {
return err
}
- if err := validateLiteralMap(request.Spec.FixedInputs, shared.FixedInputs); err != nil {
+ if err := validateLiteralMap(request.GetSpec().GetFixedInputs(), shared.FixedInputs); err != nil {
return err
}
if config.GetTopLevelConfig().FeatureGates.EnableArtifacts {
- if err := validateParameterMapAllowArtifacts(request.Spec.DefaultInputs, shared.DefaultInputs); err != nil {
+ if err := validateParameterMapAllowArtifacts(request.GetSpec().GetDefaultInputs(), shared.DefaultInputs); err != nil {
return err
}
} else {
- if err := validateParameterMapDisableArtifacts(request.Spec.DefaultInputs, shared.DefaultInputs); err != nil {
+ if err := validateParameterMapDisableArtifacts(request.GetSpec().GetDefaultInputs(), shared.DefaultInputs); err != nil {
return err
}
}
- expectedInputs, err := checkAndFetchExpectedInputForLaunchPlan(workflowInterface.GetInputs(), request.Spec.FixedInputs, request.Spec.DefaultInputs)
+ expectedInputs, err := checkAndFetchExpectedInputForLaunchPlan(workflowInterface.GetInputs(), request.GetSpec().GetFixedInputs(), request.GetSpec().GetDefaultInputs())
if err != nil {
return err
}
@@ -58,8 +58,8 @@ func ValidateLaunchPlan(ctx context.Context,
// Augment default inputs with the unbound workflow inputs.
request.Spec.DefaultInputs = expectedInputs
- if request.Spec.EntityMetadata != nil {
- if err := validateNotifications(request.Spec.EntityMetadata.Notifications); err != nil {
+ if request.GetSpec().GetEntityMetadata() != nil {
+ if err := validateNotifications(request.GetSpec().GetEntityMetadata().GetNotifications()); err != nil {
return err
}
if request.GetSpec().GetEntityMetadata().GetLaunchConditions() != nil {
@@ -74,7 +74,7 @@ func ValidateLaunchPlan(ctx context.Context,
func validateSchedule(request *admin.LaunchPlanCreateRequest, expectedInputs *core.ParameterMap) error {
schedule := request.GetSpec().GetEntityMetadata().GetSchedule()
if schedule.GetCronExpression() != "" || schedule.GetRate() != nil || schedule.GetCronSchedule() != nil {
- for key, value := range expectedInputs.Parameters {
+ for key, value := range expectedInputs.GetParameters() {
if value.GetRequired() && key != schedule.GetKickoffTimeInputArg() {
return errors.NewFlyteAdminErrorf(
codes.InvalidArgument,
@@ -82,7 +82,7 @@ func validateSchedule(request *admin.LaunchPlanCreateRequest, expectedInputs *co
}
}
if schedule.GetKickoffTimeInputArg() != "" {
- if param, ok := expectedInputs.Parameters[schedule.GetKickoffTimeInputArg()]; !ok {
+ if param, ok := expectedInputs.GetParameters()[schedule.GetKickoffTimeInputArg()]; !ok {
return errors.NewFlyteAdminErrorf(
codes.InvalidArgument,
"Cannot create a schedule with a KickoffTimeInputArg that does not point to a free input. [%v] is not free or does not exist.", schedule.GetKickoffTimeInputArg())
@@ -125,7 +125,7 @@ func checkAndFetchExpectedInputForLaunchPlan(
}
// If there are no inputs that the workflow requires, there should be none at launch plan as well
- if workflowVariableMap == nil || len(workflowVariableMap.Variables) == 0 {
+ if workflowVariableMap == nil || len(workflowVariableMap.GetVariables()) == 0 {
if len(defaultInputMap) > 0 {
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument,
"invalid launch plan default inputs, expected none but found %d", len(defaultInputMap))
@@ -139,7 +139,7 @@ func checkAndFetchExpectedInputForLaunchPlan(
}, nil
}
- workflowExpectedInputMap = workflowVariableMap.Variables
+ workflowExpectedInputMap = workflowVariableMap.GetVariables()
for name, defaultInput := range defaultInputMap {
value, ok := workflowExpectedInputMap[name]
if !ok {
diff --git a/flyteadmin/pkg/manager/impl/validation/launch_plan_validator_test.go b/flyteadmin/pkg/manager/impl/validation/launch_plan_validator_test.go
index 7ceb0eae4d..8dee3e3cca 100644
--- a/flyteadmin/pkg/manager/impl/validation/launch_plan_validator_test.go
+++ b/flyteadmin/pkg/manager/impl/validation/launch_plan_validator_test.go
@@ -20,7 +20,7 @@ const (
var lpApplicationConfig = testutils.GetApplicationConfigWithDefaultDomains()
func getWorkflowInterface() *core.TypedInterface {
- return testutils.GetSampleWorkflowSpecForTest().Template.Interface
+ return testutils.GetSampleWorkflowSpecForTest().GetTemplate().GetInterface()
}
func TestValidateLpEmptyProject(t *testing.T) {
diff --git a/flyteadmin/pkg/manager/impl/validation/named_entity_validator.go b/flyteadmin/pkg/manager/impl/validation/named_entity_validator.go
index e9af05f527..3b8fb6963e 100644
--- a/flyteadmin/pkg/manager/impl/validation/named_entity_validator.go
+++ b/flyteadmin/pkg/manager/impl/validation/named_entity_validator.go
@@ -13,46 +13,46 @@ import (
var archivableResourceTypes = sets.NewInt32(int32(core.ResourceType_WORKFLOW), int32(core.ResourceType_TASK), int32(core.ResourceType_LAUNCH_PLAN))
func ValidateNamedEntityGetRequest(request *admin.NamedEntityGetRequest) error {
- if err := ValidateResourceType(request.ResourceType); err != nil {
+ if err := ValidateResourceType(request.GetResourceType()); err != nil {
return err
}
- if err := ValidateNamedEntityIdentifier(request.Id); err != nil {
+ if err := ValidateNamedEntityIdentifier(request.GetId()); err != nil {
return err
}
return nil
}
func ValidateNamedEntityUpdateRequest(request *admin.NamedEntityUpdateRequest) error {
- if err := ValidateResourceType(request.ResourceType); err != nil {
+ if err := ValidateResourceType(request.GetResourceType()); err != nil {
return err
}
- if err := ValidateNamedEntityIdentifier(request.Id); err != nil {
+ if err := ValidateNamedEntityIdentifier(request.GetId()); err != nil {
return err
}
- if request.Metadata == nil {
+ if request.GetMetadata() == nil {
return shared.GetMissingArgumentError(shared.Metadata)
}
// Only tasks and workflow resources can be modified from the default state.
- if request.Metadata.State != admin.NamedEntityState_NAMED_ENTITY_ACTIVE &&
- !archivableResourceTypes.Has(int32(request.ResourceType)) {
+ if request.GetMetadata().GetState() != admin.NamedEntityState_NAMED_ENTITY_ACTIVE &&
+ !archivableResourceTypes.Has(int32(request.GetResourceType())) {
return errors.NewFlyteAdminErrorf(codes.InvalidArgument,
- "Resource [%s] cannot have its state updated", request.ResourceType.String())
+ "Resource [%s] cannot have its state updated", request.GetResourceType().String())
}
return nil
}
func ValidateNamedEntityListRequest(request *admin.NamedEntityListRequest) error {
- if err := ValidateEmptyStringField(request.Project, shared.Project); err != nil {
+ if err := ValidateEmptyStringField(request.GetProject(), shared.Project); err != nil {
return err
}
- if err := ValidateEmptyStringField(request.Domain, shared.Domain); err != nil {
+ if err := ValidateEmptyStringField(request.GetDomain(), shared.Domain); err != nil {
return err
}
- if err := ValidateResourceType(request.ResourceType); err != nil {
+ if err := ValidateResourceType(request.GetResourceType()); err != nil {
return err
}
- if err := ValidateLimit(request.Limit); err != nil {
+ if err := ValidateLimit(request.GetLimit()); err != nil {
return err
}
return nil
diff --git a/flyteadmin/pkg/manager/impl/validation/node_execution_validator.go b/flyteadmin/pkg/manager/impl/validation/node_execution_validator.go
index 9ac920d143..c48dde85cd 100644
--- a/flyteadmin/pkg/manager/impl/validation/node_execution_validator.go
+++ b/flyteadmin/pkg/manager/impl/validation/node_execution_validator.go
@@ -11,66 +11,66 @@ func ValidateNodeExecutionIdentifier(identifier *core.NodeExecutionIdentifier) e
if identifier == nil {
return shared.GetMissingArgumentError(shared.ID)
}
- if identifier.ExecutionId == nil {
+ if identifier.GetExecutionId() == nil {
return shared.GetMissingArgumentError(shared.ExecutionID)
}
- if identifier.NodeId == "" {
+ if identifier.GetNodeId() == "" {
return shared.GetMissingArgumentError(shared.NodeID)
}
- return ValidateWorkflowExecutionIdentifier(identifier.ExecutionId)
+ return ValidateWorkflowExecutionIdentifier(identifier.GetExecutionId())
}
// Validates that NodeExecutionEventRequests handled by admin include a valid node execution identifier.
// In the case the event specifies a DynamicWorkflow in the TaskNodeMetadata, this method also validates the contents of
// the dynamic workflow.
func ValidateNodeExecutionEventRequest(request *admin.NodeExecutionEventRequest, maxOutputSizeInBytes int64) error {
- if request.Event == nil {
+ if request.GetEvent() == nil {
return shared.GetMissingArgumentError(shared.Event)
}
- err := ValidateNodeExecutionIdentifier(request.Event.Id)
+ err := ValidateNodeExecutionIdentifier(request.GetEvent().GetId())
if err != nil {
return err
}
- if request.Event.GetTaskNodeMetadata() != nil && request.Event.GetTaskNodeMetadata().DynamicWorkflow != nil {
- dynamicWorkflowNodeMetadata := request.Event.GetTaskNodeMetadata().DynamicWorkflow
- if err := ValidateIdentifier(dynamicWorkflowNodeMetadata.Id, common.Workflow); err != nil {
+ if request.GetEvent().GetTaskNodeMetadata() != nil && request.GetEvent().GetTaskNodeMetadata().GetDynamicWorkflow() != nil {
+ dynamicWorkflowNodeMetadata := request.GetEvent().GetTaskNodeMetadata().GetDynamicWorkflow()
+ if err := ValidateIdentifier(dynamicWorkflowNodeMetadata.GetId(), common.Workflow); err != nil {
return err
}
- if dynamicWorkflowNodeMetadata.CompiledWorkflow == nil {
+ if dynamicWorkflowNodeMetadata.GetCompiledWorkflow() == nil {
return shared.GetMissingArgumentError("compiled dynamic workflow")
}
- if dynamicWorkflowNodeMetadata.CompiledWorkflow.Primary == nil {
+ if dynamicWorkflowNodeMetadata.GetCompiledWorkflow().GetPrimary() == nil {
return shared.GetMissingArgumentError("primary dynamic workflow")
}
- if dynamicWorkflowNodeMetadata.CompiledWorkflow.Primary.Template == nil {
+ if dynamicWorkflowNodeMetadata.GetCompiledWorkflow().GetPrimary().GetTemplate() == nil {
return shared.GetMissingArgumentError("primary dynamic workflow template")
}
- if err := ValidateIdentifier(dynamicWorkflowNodeMetadata.CompiledWorkflow.Primary.Template.Id, common.Workflow); err != nil {
+ if err := ValidateIdentifier(dynamicWorkflowNodeMetadata.GetCompiledWorkflow().GetPrimary().GetTemplate().GetId(), common.Workflow); err != nil {
return err
}
}
- if err := ValidateOutputData(request.Event.GetOutputData(), maxOutputSizeInBytes); err != nil {
+ if err := ValidateOutputData(request.GetEvent().GetOutputData(), maxOutputSizeInBytes); err != nil {
return err
}
return nil
}
func ValidateNodeExecutionListRequest(request *admin.NodeExecutionListRequest) error {
- if err := ValidateWorkflowExecutionIdentifier(request.WorkflowExecutionId); err != nil {
+ if err := ValidateWorkflowExecutionIdentifier(request.GetWorkflowExecutionId()); err != nil {
return shared.GetMissingArgumentError(shared.ExecutionID)
}
- if err := ValidateLimit(request.Limit); err != nil {
+ if err := ValidateLimit(request.GetLimit()); err != nil {
return err
}
return nil
}
func ValidateNodeExecutionForTaskListRequest(request *admin.NodeExecutionForTaskListRequest) error {
- if err := ValidateTaskExecutionIdentifier(request.TaskExecutionId); err != nil {
+ if err := ValidateTaskExecutionIdentifier(request.GetTaskExecutionId()); err != nil {
return err
}
- if err := ValidateLimit(request.Limit); err != nil {
+ if err := ValidateLimit(request.GetLimit()); err != nil {
return err
}
return nil
diff --git a/flyteadmin/pkg/manager/impl/validation/notifications_validator.go b/flyteadmin/pkg/manager/impl/validation/notifications_validator.go
index 8b8f9a68e7..6dfad3a166 100644
--- a/flyteadmin/pkg/manager/impl/validation/notifications_validator.go
+++ b/flyteadmin/pkg/manager/impl/validation/notifications_validator.go
@@ -23,22 +23,22 @@ func validateNotifications(notifications []*admin.Notification) error {
for _, notif := range notifications {
switch {
case notif.GetEmail() != nil:
- if err := validateRecipientsEmail(notif.GetEmail().RecipientsEmail); err != nil {
+ if err := validateRecipientsEmail(notif.GetEmail().GetRecipientsEmail()); err != nil {
return err
}
case notif.GetSlack() != nil:
- if err := validateRecipientsEmail(notif.GetSlack().RecipientsEmail); err != nil {
+ if err := validateRecipientsEmail(notif.GetSlack().GetRecipientsEmail()); err != nil {
return err
}
case notif.GetPagerDuty() != nil:
- if err := validateRecipientsEmail(notif.GetPagerDuty().RecipientsEmail); err != nil {
+ if err := validateRecipientsEmail(notif.GetPagerDuty().GetRecipientsEmail()); err != nil {
return err
}
default:
return shared.GetInvalidArgumentError("notification type")
}
- for _, phase := range notif.Phases {
+ for _, phase := range notif.GetPhases() {
if !common.IsExecutionTerminal(phase) {
return shared.GetInvalidArgumentError("phase")
}
diff --git a/flyteadmin/pkg/manager/impl/validation/project_validator.go b/flyteadmin/pkg/manager/impl/validation/project_validator.go
index 76bab900c1..fbdd6a0ca2 100644
--- a/flyteadmin/pkg/manager/impl/validation/project_validator.go
+++ b/flyteadmin/pkg/manager/impl/validation/project_validator.go
@@ -21,40 +21,40 @@ const maxDescriptionLength = 300
const maxLabelArrayLength = 16
func ValidateProjectRegisterRequest(request *admin.ProjectRegisterRequest) error {
- if request.Project == nil {
+ if request.GetProject() == nil {
return shared.GetMissingArgumentError(shared.Project)
}
- project := request.Project
- if err := ValidateEmptyStringField(project.Name, projectName); err != nil {
+ project := request.GetProject()
+ if err := ValidateEmptyStringField(project.GetName(), projectName); err != nil {
return err
}
return ValidateProject(project)
}
func ValidateProjectGetRequest(request *admin.ProjectGetRequest) error {
- if err := ValidateEmptyStringField(request.Id, projectID); err != nil {
+ if err := ValidateEmptyStringField(request.GetId(), projectID); err != nil {
return err
}
return nil
}
func ValidateProject(project *admin.Project) error {
- if err := ValidateEmptyStringField(project.Id, projectID); err != nil {
+ if err := ValidateEmptyStringField(project.GetId(), projectID); err != nil {
return err
}
- if err := validateLabels(project.Labels); err != nil {
+ if err := validateLabels(project.GetLabels()); err != nil {
return err
}
- if errs := validation.IsDNS1123Label(project.Id); len(errs) > 0 {
- return errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid project id [%s]: %v", project.Id, errs)
+ if errs := validation.IsDNS1123Label(project.GetId()); len(errs) > 0 {
+ return errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid project id [%s]: %v", project.GetId(), errs)
}
- if err := ValidateMaxLengthStringField(project.Name, projectName, maxNameLength); err != nil {
+ if err := ValidateMaxLengthStringField(project.GetName(), projectName, maxNameLength); err != nil {
return err
}
- if err := ValidateMaxLengthStringField(project.Description, projectDescription, maxDescriptionLength); err != nil {
+ if err := ValidateMaxLengthStringField(project.GetDescription(), projectDescription, maxDescriptionLength); err != nil {
return err
}
- if project.Domains != nil {
+ if project.GetDomains() != nil {
return errors.NewFlyteAdminError(codes.InvalidArgument,
"Domains are currently only set system wide. Please retry without domains included in your request.")
}
diff --git a/flyteadmin/pkg/manager/impl/validation/shared_execution.go b/flyteadmin/pkg/manager/impl/validation/shared_execution.go
index 07e2a26fb0..1ee17d1b8c 100644
--- a/flyteadmin/pkg/manager/impl/validation/shared_execution.go
+++ b/flyteadmin/pkg/manager/impl/validation/shared_execution.go
@@ -14,9 +14,9 @@ import (
// ValidateClusterForExecutionID validates that the execution denoted by executionId is recorded as executing on `cluster`.
func ValidateClusterForExecutionID(ctx context.Context, db repoInterfaces.Repository, executionID *core.WorkflowExecutionIdentifier, clusterInEvent string) error {
workflowExecution, err := db.ExecutionRepo().Get(ctx, repoInterfaces.Identifier{
- Project: executionID.Project,
- Domain: executionID.Domain,
- Name: executionID.Name,
+ Project: executionID.GetProject(),
+ Domain: executionID.GetDomain(),
+ Name: executionID.GetName(),
})
if err != nil {
logger.Debugf(ctx, "Failed to find existing execution with id [%+v] with err: %v", executionID, err)
diff --git a/flyteadmin/pkg/manager/impl/validation/signal_validator.go b/flyteadmin/pkg/manager/impl/validation/signal_validator.go
index af1d4425aa..0ba2d3b704 100644
--- a/flyteadmin/pkg/manager/impl/validation/signal_validator.go
+++ b/flyteadmin/pkg/manager/impl/validation/signal_validator.go
@@ -15,13 +15,13 @@ import (
)
func ValidateSignalGetOrCreateRequest(ctx context.Context, request *admin.SignalGetOrCreateRequest) error {
- if request.Id == nil {
+ if request.GetId() == nil {
return shared.GetMissingArgumentError("id")
}
- if err := ValidateSignalIdentifier(request.Id); err != nil {
+ if err := ValidateSignalIdentifier(request.GetId()); err != nil {
return err
}
- if request.Type == nil {
+ if request.GetType() == nil {
return shared.GetMissingArgumentError("type")
}
@@ -29,39 +29,39 @@ func ValidateSignalGetOrCreateRequest(ctx context.Context, request *admin.Signal
}
func ValidateSignalIdentifier(identifier *core.SignalIdentifier) error {
- if identifier.ExecutionId == nil {
+ if identifier.GetExecutionId() == nil {
return shared.GetMissingArgumentError(shared.ExecutionID)
}
- if identifier.SignalId == "" {
+ if identifier.GetSignalId() == "" {
return shared.GetMissingArgumentError("signal_id")
}
- return ValidateWorkflowExecutionIdentifier(identifier.ExecutionId)
+ return ValidateWorkflowExecutionIdentifier(identifier.GetExecutionId())
}
func ValidateSignalListRequest(ctx context.Context, request *admin.SignalListRequest) error {
- if err := ValidateWorkflowExecutionIdentifier(request.WorkflowExecutionId); err != nil {
+ if err := ValidateWorkflowExecutionIdentifier(request.GetWorkflowExecutionId()); err != nil {
return shared.GetMissingArgumentError(shared.ExecutionID)
}
- if err := ValidateLimit(request.Limit); err != nil {
+ if err := ValidateLimit(request.GetLimit()); err != nil {
return err
}
return nil
}
func ValidateSignalSetRequest(ctx context.Context, db repositoryInterfaces.Repository, request *admin.SignalSetRequest) error {
- if request.Id == nil {
+ if request.GetId() == nil {
return shared.GetMissingArgumentError("id")
}
- if err := ValidateSignalIdentifier(request.Id); err != nil {
+ if err := ValidateSignalIdentifier(request.GetId()); err != nil {
return err
}
- if request.Value == nil {
+ if request.GetValue() == nil {
return shared.GetMissingArgumentError("value")
}
// validate that signal value matches type of existing signal
- signalModel, err := transformers.CreateSignalModel(request.Id, nil, nil)
+ signalModel, err := transformers.CreateSignalModel(request.GetId(), nil, nil)
if err != nil {
return nil
}
@@ -71,7 +71,7 @@ func ValidateSignalSetRequest(ctx context.Context, db repositoryInterfaces.Repos
"failed to validate that signal [%v] exists, err: [%+v]",
signalModel.SignalKey, err)
}
- valueType := propellervalidators.LiteralTypeForLiteral(request.Value)
+ valueType := propellervalidators.LiteralTypeForLiteral(request.GetValue())
lookupSignal, err := transformers.FromSignalModel(lookupSignalModel)
if err != nil {
return err
@@ -80,10 +80,10 @@ func ValidateSignalSetRequest(ctx context.Context, db repositoryInterfaces.Repos
if err != nil {
return errors.NewInvalidLiteralTypeError("", err)
}
- if !propellervalidators.AreTypesCastable(lookupSignal.Type, valueType) {
+ if !propellervalidators.AreTypesCastable(lookupSignal.GetType(), valueType) {
return errors.NewFlyteAdminErrorf(codes.InvalidArgument,
"requested signal value [%v] is not castable to existing signal type [%v]",
- request.Value, lookupSignalModel.Type)
+ request.GetValue(), lookupSignalModel.Type)
}
return nil
diff --git a/flyteadmin/pkg/manager/impl/validation/task_execution_validator.go b/flyteadmin/pkg/manager/impl/validation/task_execution_validator.go
index dfe80541b0..dee4b86c3b 100644
--- a/flyteadmin/pkg/manager/impl/validation/task_execution_validator.go
+++ b/flyteadmin/pkg/manager/impl/validation/task_execution_validator.go
@@ -8,20 +8,20 @@ import (
)
func ValidateTaskExecutionRequest(request *admin.TaskExecutionEventRequest, maxOutputSizeInBytes int64) error {
- if request.Event == nil {
+ if request.GetEvent() == nil {
return shared.GetMissingArgumentError(shared.Event)
}
- if request.Event.OccurredAt == nil {
+ if request.GetEvent().GetOccurredAt() == nil {
return shared.GetMissingArgumentError(shared.OccurredAt)
}
- if err := ValidateOutputData(request.Event.GetOutputData(), maxOutputSizeInBytes); err != nil {
+ if err := ValidateOutputData(request.GetEvent().GetOutputData(), maxOutputSizeInBytes); err != nil {
return err
}
return ValidateTaskExecutionIdentifier(&core.TaskExecutionIdentifier{
- TaskId: request.Event.TaskId,
- NodeExecutionId: request.Event.ParentNodeExecutionId,
- RetryAttempt: request.Event.RetryAttempt,
+ TaskId: request.GetEvent().GetTaskId(),
+ NodeExecutionId: request.GetEvent().GetParentNodeExecutionId(),
+ RetryAttempt: request.GetEvent().GetRetryAttempt(),
})
}
@@ -29,19 +29,19 @@ func ValidateTaskExecutionIdentifier(identifier *core.TaskExecutionIdentifier) e
if identifier == nil {
return shared.GetMissingArgumentError(shared.ID)
}
- if identifier.NodeExecutionId == nil {
+ if identifier.GetNodeExecutionId() == nil {
return shared.GetMissingArgumentError(shared.NodeExecutionID)
}
- if err := ValidateNodeExecutionIdentifier(identifier.NodeExecutionId); err != nil {
+ if err := ValidateNodeExecutionIdentifier(identifier.GetNodeExecutionId()); err != nil {
return err
}
- if identifier.TaskId == nil {
+ if identifier.GetTaskId() == nil {
return shared.GetMissingArgumentError(shared.TaskID)
}
- if err := ValidateIdentifier(identifier.TaskId, common.Task); err != nil {
+ if err := ValidateIdentifier(identifier.GetTaskId(), common.Task); err != nil {
return err
}
@@ -49,10 +49,10 @@ func ValidateTaskExecutionIdentifier(identifier *core.TaskExecutionIdentifier) e
}
func ValidateTaskExecutionListRequest(request *admin.TaskExecutionListRequest) error {
- if err := ValidateNodeExecutionIdentifier(request.NodeExecutionId); err != nil {
+ if err := ValidateNodeExecutionIdentifier(request.GetNodeExecutionId()); err != nil {
return err
}
- if err := ValidateLimit(request.Limit); err != nil {
+ if err := ValidateLimit(request.GetLimit()); err != nil {
return err
}
return nil
diff --git a/flyteadmin/pkg/manager/impl/validation/task_validator.go b/flyteadmin/pkg/manager/impl/validation/task_validator.go
index 0f0f86fb0b..991048d97e 100644
--- a/flyteadmin/pkg/manager/impl/validation/task_validator.go
+++ b/flyteadmin/pkg/manager/impl/validation/task_validator.go
@@ -26,17 +26,17 @@ var whitelistedTaskErr = errors.NewFlyteAdminErrorf(codes.InvalidArgument, "task
// This is called for a task with a non-nil container.
func validateContainer(task *core.TaskTemplate, platformTaskResources workflowengineInterfaces.TaskResources) error {
- if err := ValidateEmptyStringField(task.GetContainer().Image, shared.Image); err != nil {
+ if err := ValidateEmptyStringField(task.GetContainer().GetImage(), shared.Image); err != nil {
return err
}
- if task.GetContainer().Resources == nil {
+ if task.GetContainer().GetResources() == nil {
return nil
}
- if err := validateTaskResources(task.Id, platformTaskResources.Limits, task.GetContainer().Resources.Requests,
- task.GetContainer().Resources.Limits); err != nil {
+ if err := validateTaskResources(task.GetId(), platformTaskResources.Limits, task.GetContainer().GetResources().GetRequests(),
+ task.GetContainer().GetResources().GetLimits()); err != nil {
logger.Debugf(context.Background(), "encountered errors validating task resources for [%+v]: %v",
- task.Id, err)
+ task.GetId(), err)
return err
}
return nil
@@ -44,23 +44,23 @@ func validateContainer(task *core.TaskTemplate, platformTaskResources workflowen
// This is called for a task with a non-nil k8s pod.
func validateK8sPod(task *core.TaskTemplate, platformTaskResources workflowengineInterfaces.TaskResources) error {
- if task.GetK8SPod().PodSpec == nil {
+ if task.GetK8SPod().GetPodSpec() == nil {
return errors.NewFlyteAdminErrorf(codes.InvalidArgument,
"invalid TaskSpecification, pod tasks should specify their target as a K8sPod with a defined pod spec")
}
var podSpec corev1.PodSpec
- if err := utils.UnmarshalStructToObj(task.GetK8SPod().PodSpec, &podSpec); err != nil {
+ if err := utils.UnmarshalStructToObj(task.GetK8SPod().GetPodSpec(), &podSpec); err != nil {
logger.Debugf(context.Background(), "failed to unmarshal k8s podspec [%+v]: %v",
- task.GetK8SPod().PodSpec, err)
+ task.GetK8SPod().GetPodSpec(), err)
return err
}
platformTaskResourceLimits := taskResourceSetToMap(platformTaskResources.Limits)
for _, container := range podSpec.Containers {
- err := validateResource(task.Id, resourceListToQuantity(container.Resources.Requests),
+ err := validateResource(task.GetId(), resourceListToQuantity(container.Resources.Requests),
resourceListToQuantity(container.Resources.Limits), platformTaskResourceLimits)
if err != nil {
logger.Debugf(context.Background(), "encountered errors validating task resources for [%+v]: %v",
- task.Id, err)
+ task.GetId(), err)
return err
}
}
@@ -69,7 +69,7 @@ func validateK8sPod(task *core.TaskTemplate, platformTaskResources workflowengin
}
func validateRuntimeMetadata(metadata *core.RuntimeMetadata) error {
- if err := ValidateEmptyStringField(metadata.Version, shared.RuntimeVersion); err != nil {
+ if err := ValidateEmptyStringField(metadata.GetVersion(), shared.RuntimeVersion); err != nil {
return err
}
return nil
@@ -78,21 +78,21 @@ func validateRuntimeMetadata(metadata *core.RuntimeMetadata) error {
func validateTaskTemplate(taskID *core.Identifier, task *core.TaskTemplate,
platformTaskResources workflowengineInterfaces.TaskResources, whitelistConfig runtime.WhitelistConfiguration) error {
- if err := ValidateEmptyStringField(task.Type, shared.Type); err != nil {
+ if err := ValidateEmptyStringField(task.GetType(), shared.Type); err != nil {
return err
}
- if err := validateTaskType(taskID, task.Type, whitelistConfig); err != nil {
+ if err := validateTaskType(taskID, task.GetType(), whitelistConfig); err != nil {
return err
}
- if task.Metadata == nil {
+ if task.GetMetadata() == nil {
return shared.GetMissingArgumentError(shared.Metadata)
}
- if task.Metadata.Runtime != nil {
- if err := validateRuntimeMetadata(task.Metadata.Runtime); err != nil {
+ if task.GetMetadata().GetRuntime() != nil {
+ if err := validateRuntimeMetadata(task.GetMetadata().GetRuntime()); err != nil {
return err
}
}
- if task.Interface == nil {
+ if task.GetInterface() == nil {
// The actual interface proto has nothing to validate.
return shared.GetMissingArgumentError(shared.TypedInterface)
}
@@ -110,16 +110,16 @@ func ValidateTask(
ctx context.Context, request *admin.TaskCreateRequest, db repositoryInterfaces.Repository,
platformTaskResources workflowengineInterfaces.TaskResources, whitelistConfig runtime.WhitelistConfiguration,
applicationConfig runtime.ApplicationConfiguration) error {
- if err := ValidateIdentifier(request.Id, common.Task); err != nil {
+ if err := ValidateIdentifier(request.GetId(), common.Task); err != nil {
return err
}
- if err := ValidateProjectAndDomain(ctx, db, applicationConfig, request.Id.Project, request.Id.Domain); err != nil {
+ if err := ValidateProjectAndDomain(ctx, db, applicationConfig, request.GetId().GetProject(), request.GetId().GetDomain()); err != nil {
return err
}
- if request.Spec == nil || request.Spec.Template == nil {
+ if request.GetSpec() == nil || request.GetSpec().GetTemplate() == nil {
return shared.GetMissingArgumentError(shared.Spec)
}
- return validateTaskTemplate(request.Id, request.Spec.Template, platformTaskResources, whitelistConfig)
+ return validateTaskTemplate(request.GetId(), request.GetSpec().GetTemplate(), platformTaskResources, whitelistConfig)
}
func taskResourceSetToMap(
@@ -143,18 +143,18 @@ func taskResourceSetToMap(
func addResourceEntryToMap(
identifier *core.Identifier, entry *core.Resources_ResourceEntry,
resourceEntries *map[core.Resources_ResourceName]resource.Quantity) error {
- if _, ok := (*resourceEntries)[entry.Name]; ok {
+ if _, ok := (*resourceEntries)[entry.GetName()]; ok {
return errors.NewFlyteAdminErrorf(codes.InvalidArgument,
- "can't specify %v limit for task [%+v] multiple times", entry.Name, identifier)
+ "can't specify %v limit for task [%+v] multiple times", entry.GetName(), identifier)
}
- quantity, err := resource.ParseQuantity(entry.Value)
+ quantity, err := resource.ParseQuantity(entry.GetValue())
if err != nil {
return errors.NewFlyteAdminErrorf(codes.InvalidArgument,
"Parsing of %v request failed for value %v - reason %v. "+
"Please follow K8s conventions for resources "+
- "https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", entry.Name, entry.Value, err)
+ "https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", entry.GetName(), entry.GetValue(), err)
}
- (*resourceEntries)[entry.Name] = quantity
+ (*resourceEntries)[entry.GetName()] = quantity
return nil
}
@@ -184,7 +184,7 @@ func requestedResourcesToQuantity(
var requestedToQuantity = make(map[core.Resources_ResourceName]resource.Quantity)
for _, limitEntry := range resources {
- switch limitEntry.Name {
+ switch limitEntry.GetName() {
case core.Resources_CPU:
fallthrough
case core.Resources_MEMORY:
@@ -199,7 +199,7 @@ func requestedResourcesToQuantity(
}
if !isWholeNumber(requestedToQuantity[core.Resources_GPU]) {
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument,
- "gpu for [%+v] must be a whole number, got: %s instead", identifier, limitEntry.Value)
+ "gpu for [%+v] must be a whole number, got: %s instead", identifier, limitEntry.GetValue())
}
case core.Resources_EPHEMERAL_STORAGE:
err := addResourceEntryToMap(identifier, limitEntry, &requestedToQuantity)
@@ -252,15 +252,14 @@ func validateResource(identifier *core.Identifier, requestedResourceDefaults,
if ok && platformLimitOk && limitQuantity.Value() > platformLimit.Value() {
// Also check that the requested limit is less than the platform task limit.
return errors.NewFlyteAdminErrorf(codes.InvalidArgument,
- "Requested %v limit [%v] is greater than current limit set in the platform configuration"+
- " [%v]. Please contact Flyte Admins to change these limits or consult the configuration",
+ "Requested %v limit [%v] is greater than current limit set in the platform configuration [%v]. Please contact Flyte Admins to change these limits or consult the configuration",
resourceName, limitQuantity.String(), platformLimit.String())
}
if platformLimitOk && defaultQuantity.Value() > platformTaskResourceLimits[resourceName].Value() {
// Also check that the requested limit is less than the platform task limit.
return errors.NewFlyteAdminErrorf(codes.InvalidArgument,
- "Requested %v default [%v] is greater than current limit set in the platform configuration"+
- " [%v]. Please contact Flyte Admins to change these limits or consult the configuration",
+ "Requested %v default [%v] is greater than current limit set in the platform configuration [%v]. Please contact Flyte Admins to change these limits or consult the configuration",
+
resourceName, defaultQuantity.String(), platformTaskResourceLimits[resourceName].String())
}
case core.Resources_GPU:
@@ -273,8 +272,7 @@ func validateResource(identifier *core.Identifier, requestedResourceDefaults,
platformLimit, platformLimitOk := platformTaskResourceLimits[resourceName]
if platformLimitOk && defaultQuantity.Value() > platformLimit.Value() {
return errors.NewFlyteAdminErrorf(codes.InvalidArgument,
- "Requested %v default [%v] is greater than current limit set in the platform configuration"+
- " [%v]. Please contact Flyte Admins to change these limits or consult the configuration",
+ "Requested %v default [%v] is greater than current limit set in the platform configuration [%v]. Please contact Flyte Admins to change these limits or consult the configuration",
resourceName, defaultQuantity.String(), platformLimit.String())
}
}
@@ -295,14 +293,14 @@ func validateTaskType(taskID *core.Identifier, taskType string, whitelistConfig
if scope.Project == "" {
// All projects whitelisted
return nil
- } else if scope.Project != taskID.Project {
+ } else if scope.Project != taskID.GetProject() {
continue
}
// We have a potential match! Verify that this task type is approved given the specificity of the whitelist.
if scope.Domain == "" {
// All domains for this project are whitelisted
return nil
- } else if scope.Domain == taskID.Domain {
+ } else if scope.Domain == taskID.GetDomain() {
return nil
}
diff --git a/flyteadmin/pkg/manager/impl/validation/validation.go b/flyteadmin/pkg/manager/impl/validation/validation.go
index de2927495c..03bc8f963d 100644
--- a/flyteadmin/pkg/manager/impl/validation/validation.go
+++ b/flyteadmin/pkg/manager/impl/validation/validation.go
@@ -50,10 +50,10 @@ func ValidateMaxMapLengthField(m map[string]string, fieldName string, limit int)
}
func validateLabels(labels *admin.Labels) error {
- if labels == nil || len(labels.Values) == 0 {
+ if labels == nil || len(labels.GetValues()) == 0 {
return nil
}
- if err := ValidateMaxMapLengthField(labels.Values, "labels", maxLabelArrayLength); err != nil {
+ if err := ValidateMaxMapLengthField(labels.GetValues(), "labels", maxLabelArrayLength); err != nil {
return err
}
if err := validateLabelsAlphanumeric(labels); err != nil {
@@ -65,7 +65,7 @@ func validateLabels(labels *admin.Labels) error {
// Given an admin.Labels, checks if the labels exist or not and if it does, checks if the labels are K8s compliant,
// i.e. alphanumeric + - and _
func validateLabelsAlphanumeric(labels *admin.Labels) error {
- for key, value := range labels.Values {
+ for key, value := range labels.GetValues() {
if errs := validation.IsQualifiedName(key); len(errs) > 0 {
return errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid label key [%s]: %v", key, errs)
}
@@ -80,16 +80,16 @@ func ValidateIdentifierFieldsSet(id *core.Identifier) error {
if id == nil {
return shared.GetMissingArgumentError(shared.ID)
}
- if err := ValidateEmptyStringField(id.Project, shared.Project); err != nil {
+ if err := ValidateEmptyStringField(id.GetProject(), shared.Project); err != nil {
return err
}
- if err := ValidateEmptyStringField(id.Domain, shared.Domain); err != nil {
+ if err := ValidateEmptyStringField(id.GetDomain(), shared.Domain); err != nil {
return err
}
- if err := ValidateEmptyStringField(id.Name, shared.Name); err != nil {
+ if err := ValidateEmptyStringField(id.GetName(), shared.Name); err != nil {
return err
}
- if err := ValidateEmptyStringField(id.Version, shared.Version); err != nil {
+ if err := ValidateEmptyStringField(id.GetVersion(), shared.Version); err != nil {
return err
}
return nil
@@ -100,10 +100,10 @@ func ValidateIdentifier(id *core.Identifier, expectedType common.Entity) error {
if id == nil {
return shared.GetMissingArgumentError(shared.ID)
}
- if entityToResourceType[expectedType] != id.ResourceType {
+ if entityToResourceType[expectedType] != id.GetResourceType() {
return errors.NewFlyteAdminErrorf(codes.InvalidArgument,
"unexpected resource type %s for identifier [%+v], expected %s instead",
- strings.ToLower(id.ResourceType.String()), id, strings.ToLower(entityToResourceType[expectedType].String()))
+ strings.ToLower(id.GetResourceType().String()), id, strings.ToLower(entityToResourceType[expectedType].String()))
}
return ValidateIdentifierFieldsSet(id)
}
@@ -113,13 +113,13 @@ func ValidateNamedEntityIdentifier(id *admin.NamedEntityIdentifier) error {
if id == nil {
return shared.GetMissingArgumentError(shared.ID)
}
- if err := ValidateEmptyStringField(id.Project, shared.Project); err != nil {
+ if err := ValidateEmptyStringField(id.GetProject(), shared.Project); err != nil {
return err
}
- if err := ValidateEmptyStringField(id.Domain, shared.Domain); err != nil {
+ if err := ValidateEmptyStringField(id.GetDomain(), shared.Domain); err != nil {
return err
}
- if err := ValidateEmptyStringField(id.Name, shared.Name); err != nil {
+ if err := ValidateEmptyStringField(id.GetName(), shared.Name); err != nil {
return err
}
return nil
@@ -144,92 +144,92 @@ func ValidateVersion(version string) error {
}
func ValidateResourceListRequest(request *admin.ResourceListRequest) error {
- if request.Id == nil {
+ if request.GetId() == nil {
return shared.GetMissingArgumentError(shared.ID)
}
- if err := ValidateEmptyStringField(request.Id.Project, shared.Project); err != nil {
+ if err := ValidateEmptyStringField(request.GetId().GetProject(), shared.Project); err != nil {
return err
}
- if err := ValidateEmptyStringField(request.Id.Domain, shared.Domain); err != nil {
+ if err := ValidateEmptyStringField(request.GetId().GetDomain(), shared.Domain); err != nil {
return err
}
- if err := ValidateLimit(request.Limit); err != nil {
+ if err := ValidateLimit(request.GetLimit()); err != nil {
return err
}
return nil
}
func ValidateDescriptionEntityListRequest(request *admin.DescriptionEntityListRequest) error {
- if request.Id == nil {
+ if request.GetId() == nil {
return shared.GetMissingArgumentError(shared.ID)
}
- if err := ValidateEmptyStringField(request.Id.Project, shared.Project); err != nil {
+ if err := ValidateEmptyStringField(request.GetId().GetProject(), shared.Project); err != nil {
return err
}
- if err := ValidateEmptyStringField(request.Id.Domain, shared.Domain); err != nil {
+ if err := ValidateEmptyStringField(request.GetId().GetDomain(), shared.Domain); err != nil {
return err
}
- if err := ValidateEmptyStringField(request.Id.Name, shared.Name); err != nil {
+ if err := ValidateEmptyStringField(request.GetId().GetName(), shared.Name); err != nil {
return err
}
- if err := ValidateLimit(request.Limit); err != nil {
+ if err := ValidateLimit(request.GetLimit()); err != nil {
return err
}
return nil
}
func ValidateActiveLaunchPlanRequest(request *admin.ActiveLaunchPlanRequest) error {
- if err := ValidateEmptyStringField(request.Id.Project, shared.Project); err != nil {
+ if err := ValidateEmptyStringField(request.GetId().GetProject(), shared.Project); err != nil {
return err
}
- if err := ValidateEmptyStringField(request.Id.Domain, shared.Domain); err != nil {
+ if err := ValidateEmptyStringField(request.GetId().GetDomain(), shared.Domain); err != nil {
return err
}
- if err := ValidateEmptyStringField(request.Id.Name, shared.Name); err != nil {
+ if err := ValidateEmptyStringField(request.GetId().GetName(), shared.Name); err != nil {
return err
}
return nil
}
func ValidateActiveLaunchPlanListRequest(request *admin.ActiveLaunchPlanListRequest) error {
- if err := ValidateEmptyStringField(request.Project, shared.Project); err != nil {
+ if err := ValidateEmptyStringField(request.GetProject(), shared.Project); err != nil {
return err
}
- if err := ValidateEmptyStringField(request.Domain, shared.Domain); err != nil {
+ if err := ValidateEmptyStringField(request.GetDomain(), shared.Domain); err != nil {
return err
}
- if err := ValidateLimit(request.Limit); err != nil {
+ if err := ValidateLimit(request.GetLimit()); err != nil {
return err
}
return nil
}
func ValidateNamedEntityIdentifierListRequest(request *admin.NamedEntityIdentifierListRequest) error {
- if err := ValidateEmptyStringField(request.Project, shared.Project); err != nil {
+ if err := ValidateEmptyStringField(request.GetProject(), shared.Project); err != nil {
return err
}
- if err := ValidateEmptyStringField(request.Domain, shared.Domain); err != nil {
+ if err := ValidateEmptyStringField(request.GetDomain(), shared.Domain); err != nil {
return err
}
- if err := ValidateLimit(request.Limit); err != nil {
+ if err := ValidateLimit(request.GetLimit()); err != nil {
return err
}
return nil
}
func ValidateDescriptionEntityGetRequest(request *admin.ObjectGetRequest) error {
- if err := ValidateResourceType(request.Id.ResourceType); err != nil {
+ if err := ValidateResourceType(request.GetId().GetResourceType()); err != nil {
return err
}
- if err := ValidateIdentifierFieldsSet(request.Id); err != nil {
+ if err := ValidateIdentifierFieldsSet(request.GetId()); err != nil {
return err
}
return nil
}
func validateLiteralMap(inputMap *core.LiteralMap, fieldName string) error {
- if inputMap != nil && len(inputMap.Literals) > 0 {
- for name, fixedInput := range inputMap.Literals {
+ if inputMap != nil && len(inputMap.GetLiterals()) > 0 {
+ for name, fixedInput := range inputMap.GetLiterals() {
if name == "" {
return errors.NewFlyteAdminErrorf(codes.InvalidArgument, "missing key in %s", fieldName)
}
@@ -251,8 +251,8 @@ func validateParameterMapAllowArtifacts(inputMap *core.ParameterMap, fieldName s
}
func validateParameterMapDisableArtifacts(inputMap *core.ParameterMap, fieldName string) error {
- if inputMap != nil && len(inputMap.Parameters) > 0 {
- for name, defaultInput := range inputMap.Parameters {
+ if inputMap != nil && len(inputMap.GetParameters()) > 0 {
+ for name, defaultInput := range inputMap.GetParameters() {
if defaultInput.GetArtifactQuery() != nil {
return errors.NewFlyteAdminErrorf(codes.InvalidArgument, "artifact mode not enabled but query found %s %s", fieldName, name)
}
@@ -262,8 +262,8 @@ func validateParameterMapDisableArtifacts(inputMap *core.ParameterMap, fieldName
}
func validateParameterMap(inputMap *core.ParameterMap, fieldName string) error {
- if inputMap != nil && len(inputMap.Parameters) > 0 {
- for name, defaultInput := range inputMap.Parameters {
+ if inputMap != nil && len(inputMap.GetParameters()) > 0 {
+ for name, defaultInput := range inputMap.GetParameters() {
if name == "" {
return errors.NewFlyteAdminErrorf(codes.InvalidArgument, "missing key in %s", fieldName)
}
@@ -347,7 +347,7 @@ func ValidateDatetime(literal *core.Literal) error {
err := timestamp.CheckValid()
if err != nil {
- return errors.NewFlyteAdminErrorf(codes.InvalidArgument, err.Error())
+ return errors.NewFlyteAdminErrorf(codes.InvalidArgument, err.Error()) //nolint
}
return nil
}
diff --git a/flyteadmin/pkg/manager/impl/validation/workflow_validator.go b/flyteadmin/pkg/manager/impl/validation/workflow_validator.go
index d5d2681375..7a5f36e78b 100644
--- a/flyteadmin/pkg/manager/impl/validation/workflow_validator.go
+++ b/flyteadmin/pkg/manager/impl/validation/workflow_validator.go
@@ -22,13 +22,13 @@ const numSystemNodes = 2 // A workflow graph always has a start and end node inj
func ValidateWorkflow(
ctx context.Context, request *admin.WorkflowCreateRequest, db repositoryInterfaces.Repository,
config runtime.ApplicationConfiguration) error {
- if err := ValidateIdentifier(request.Id, common.Workflow); err != nil {
+ if err := ValidateIdentifier(request.GetId(), common.Workflow); err != nil {
return err
}
- if err := ValidateProjectAndDomain(ctx, db, config, request.Id.Project, request.Id.Domain); err != nil {
+ if err := ValidateProjectAndDomain(ctx, db, config, request.GetId().GetProject(), request.GetId().GetDomain()); err != nil {
return err
}
- if request.Spec == nil || request.Spec.Template == nil {
+ if request.GetSpec() == nil || request.GetSpec().GetTemplate() == nil {
return shared.GetMissingArgumentError(shared.Spec)
}
return nil
@@ -47,12 +47,12 @@ func ValidateCompiledWorkflow(identifier *core.Identifier, workflow *admin.Workf
// Treat this is unset. There is no limit to compare against.
return nil
}
- if workflow.CompiledWorkflow == nil || workflow.CompiledWorkflow.Primary == nil ||
- workflow.CompiledWorkflow.Primary.Template == nil || workflow.CompiledWorkflow.Primary.Template.Nodes == nil {
+ if workflow.GetCompiledWorkflow() == nil || workflow.GetCompiledWorkflow().GetPrimary() == nil ||
+ workflow.GetCompiledWorkflow().GetPrimary().GetTemplate() == nil || workflow.GetCompiledWorkflow().GetPrimary().GetTemplate().GetNodes() == nil {
logger.Warningf(context.Background(), "workflow [%+v] did not have any primary nodes", identifier)
return nil
}
- numUserNodes := len(workflow.CompiledWorkflow.Primary.Template.Nodes) - numSystemNodes
+ numUserNodes := len(workflow.GetCompiledWorkflow().GetPrimary().GetTemplate().GetNodes()) - numSystemNodes
if numUserNodes > config.GetWorkflowNodeLimit() {
return errors.NewFlyteAdminErrorf(codes.InvalidArgument,
"number of nodes in workflow [%+v] exceeds limit (%v > %v)", identifier,
diff --git a/flyteadmin/pkg/manager/impl/version_manager_test.go b/flyteadmin/pkg/manager/impl/version_manager_test.go
index 5cea4a0b15..7b5b5c9bac 100644
--- a/flyteadmin/pkg/manager/impl/version_manager_test.go
+++ b/flyteadmin/pkg/manager/impl/version_manager_test.go
@@ -24,7 +24,7 @@ func TestVersionManager_GetVersion(t *testing.T) {
v, err := vmanager.GetVersion(context.Background(), &admin.GetVersionRequest{})
assert.Nil(t, err)
- assert.Equal(t, v.ControlPlaneVersion.BuildTime, buildTime)
- assert.Equal(t, v.ControlPlaneVersion.Build, build)
- assert.Equal(t, v.ControlPlaneVersion.Version, appversion)
+ assert.Equal(t, v.GetControlPlaneVersion().GetBuildTime(), buildTime)
+ assert.Equal(t, v.GetControlPlaneVersion().GetBuild(), build)
+ assert.Equal(t, v.GetControlPlaneVersion().GetVersion(), appversion)
}
diff --git a/flyteadmin/pkg/manager/impl/workflow_manager.go b/flyteadmin/pkg/manager/impl/workflow_manager.go
index d3bfdc67dd..b99de8773f 100644
--- a/flyteadmin/pkg/manager/impl/workflow_manager.go
+++ b/flyteadmin/pkg/manager/impl/workflow_manager.go
@@ -48,26 +48,26 @@ type WorkflowManager struct {
}
func getWorkflowContext(ctx context.Context, identifier *core.Identifier) context.Context {
- ctx = contextutils.WithProjectDomain(ctx, identifier.Project, identifier.Domain)
- return contextutils.WithWorkflowID(ctx, identifier.Name)
+ ctx = contextutils.WithProjectDomain(ctx, identifier.GetProject(), identifier.GetDomain())
+ return contextutils.WithWorkflowID(ctx, identifier.GetName())
}
func (w *WorkflowManager) setDefaults(request *admin.WorkflowCreateRequest) (*admin.WorkflowCreateRequest, error) {
// TODO: Also add environment and configuration defaults once those have been determined.
- if request.Id == nil {
+ if request.GetId() == nil {
return request, errors.NewFlyteAdminError(codes.InvalidArgument, "missing identifier for WorkflowCreateRequest")
}
- request.Spec.Template.Id = request.Id
+ request.Spec.Template.Id = request.GetId()
return request, nil
}
func (w *WorkflowManager) getCompiledWorkflow(
ctx context.Context, request *admin.WorkflowCreateRequest) (*admin.WorkflowClosure, error) {
- reqs, err := w.compiler.GetRequirements(request.Spec.Template, request.Spec.SubWorkflows)
+ reqs, err := w.compiler.GetRequirements(request.GetSpec().GetTemplate(), request.GetSpec().GetSubWorkflows())
if err != nil {
w.metrics.CompilationFailures.Inc()
logger.Errorf(ctx, "Failed to get workflow requirements for template [%+v] with err %v",
- request.Spec.Template, err)
+ request.GetSpec().GetTemplate(), err)
return &admin.WorkflowClosure{}, err
}
@@ -76,10 +76,10 @@ func (w *WorkflowManager) getCompiledWorkflow(
task, err := util.GetTask(ctx, w.db, taskID)
if err != nil {
logger.Debugf(ctx, "Failed to get task with id [%+v] when compiling workflow with id [%+v] with err %v",
- taskID, request.Id, err)
+ taskID, request.GetId(), err)
return &admin.WorkflowClosure{}, err
}
- tasks[idx] = task.Closure.CompiledTask
+ tasks[idx] = task.GetClosure().GetCompiledTask()
}
var launchPlans = make([]compiler.InterfaceProvider, len(reqs.GetRequiredLaunchPlanIds()))
@@ -88,7 +88,7 @@ func (w *WorkflowManager) getCompiledWorkflow(
launchPlanModel, err = util.GetLaunchPlanModel(ctx, w.db, launchPlanID)
if err != nil {
logger.Debugf(ctx, "Failed to get launch plan with id [%+v] when compiling workflow with id [%+v] with err %v",
- launchPlanID, request.Id, err)
+ launchPlanID, request.GetId(), err)
return &admin.WorkflowClosure{}, err
}
var launchPlanInterfaceProvider workflowengine.InterfaceProvider
@@ -101,16 +101,16 @@ func (w *WorkflowManager) getCompiledWorkflow(
launchPlans[idx] = launchPlanInterfaceProvider
}
- closure, err := w.compiler.CompileWorkflow(request.Spec.Template, request.Spec.SubWorkflows, tasks, launchPlans)
+ closure, err := w.compiler.CompileWorkflow(request.GetSpec().GetTemplate(), request.GetSpec().GetSubWorkflows(), tasks, launchPlans)
if err != nil {
w.metrics.CompilationFailures.Inc()
- logger.Debugf(ctx, "Failed to compile workflow with id [%+v] with err %v", request.Id, err)
+ logger.Debugf(ctx, "Failed to compile workflow with id [%+v] with err %v", request.GetId(), err)
return &admin.WorkflowClosure{}, err
}
createdAt, err := ptypes.TimestampProto(time.Now())
if err != nil {
return &admin.WorkflowClosure{}, errors.NewFlyteAdminErrorf(codes.Internal,
- "Failed to serialize CreatedAt: %v when saving compiled workflow %+v", err, request.Id)
+ "Failed to serialize CreatedAt: %v when saving compiled workflow %+v", err, request.GetId())
}
return &admin.WorkflowClosure{
CompiledWorkflow: closure,
@@ -121,10 +121,10 @@ func (w *WorkflowManager) getCompiledWorkflow(
func (w *WorkflowManager) createDataReference(
ctx context.Context, identifier *core.Identifier) (storage.DataReference, error) {
nestedSubKeys := []string{
- identifier.Project,
- identifier.Domain,
- identifier.Name,
- identifier.Version,
+ identifier.GetProject(),
+ identifier.GetDomain(),
+ identifier.GetName(),
+ identifier.GetVersion(),
}
nestedKeys := append(w.storagePrefix, nestedSubKeys...)
return w.storageClient.ConstructReference(ctx, w.storageClient.GetBaseContainerFQN(ctx), nestedKeys...)
@@ -136,10 +136,10 @@ func (w *WorkflowManager) CreateWorkflow(
if err := validation.ValidateWorkflow(ctx, request, w.db, w.config.ApplicationConfiguration()); err != nil {
return nil, err
}
- ctx = getWorkflowContext(ctx, request.Id)
+ ctx = getWorkflowContext(ctx, request.GetId())
finalizedRequest, err := w.setDefaults(request)
if err != nil {
- logger.Debugf(ctx, "Failed to set defaults for workflow with id [%+v] with err %v", request.Id, err)
+ logger.Debugf(ctx, "Failed to set defaults for workflow with id [%+v] with err %v", request.GetId(), err)
return nil, err
}
// Validate that the workflow compiles.
@@ -147,21 +147,21 @@ func (w *WorkflowManager) CreateWorkflow(
if err != nil {
logger.Errorf(ctx, "Failed to compile workflow with err: %v", err)
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument,
- "failed to compile workflow for [%+v] with err: %v", request.Id, err)
+ "failed to compile workflow for [%+v] with err: %v", request.GetId(), err)
}
err = validation.ValidateCompiledWorkflow(
- request.Id, workflowClosure, w.config.RegistrationValidationConfiguration())
+ request.GetId(), workflowClosure, w.config.RegistrationValidationConfiguration())
if err != nil {
return nil, err
}
- workflowDigest, err := util.GetWorkflowDigest(ctx, workflowClosure.CompiledWorkflow)
+ workflowDigest, err := util.GetWorkflowDigest(ctx, workflowClosure.GetCompiledWorkflow())
if err != nil {
logger.Errorf(ctx, "failed to compute workflow digest with err %v", err)
return nil, err
}
// Assert that a matching workflow doesn't already exist before uploading the workflow closure.
- existingWorkflowModel, err := util.GetWorkflowModel(ctx, w.db, request.Id)
+ existingWorkflowModel, err := util.GetWorkflowModel(ctx, w.db, request.GetId())
// Check that no identical or conflicting workflows exist.
if err == nil {
// A workflow's structure is uniquely defined by its collection of nodes.
@@ -174,29 +174,29 @@ func (w *WorkflowManager) CreateWorkflow(
return nil, transformerErr
}
// A workflow exists with different structure
- return nil, errors.NewWorkflowExistsDifferentStructureError(ctx, request, existingWorkflow.Closure.GetCompiledWorkflow(), workflowClosure.GetCompiledWorkflow())
+ return nil, errors.NewWorkflowExistsDifferentStructureError(ctx, request, existingWorkflow.GetClosure().GetCompiledWorkflow(), workflowClosure.GetCompiledWorkflow())
} else if flyteAdminError, ok := err.(errors.FlyteAdminError); !ok || flyteAdminError.Code() != codes.NotFound {
logger.Debugf(ctx, "Failed to get workflow for comparison in CreateWorkflow with ID [%+v] with err %v",
- request.Id, err)
+ request.GetId(), err)
return nil, err
}
- remoteClosureDataRef, err := w.createDataReference(ctx, request.Spec.Template.Id)
+ remoteClosureDataRef, err := w.createDataReference(ctx, request.GetSpec().GetTemplate().GetId())
if err != nil {
logger.Infof(ctx, "failed to construct data reference for workflow closure with id [%+v] with err %v",
- request.Id, err)
+ request.GetId(), err)
return nil, errors.NewFlyteAdminErrorf(codes.Internal,
- "failed to construct data reference for workflow closure with id [%+v] and err %v", request.Id, err)
+ "failed to construct data reference for workflow closure with id [%+v] and err %v", request.GetId(), err)
}
err = w.storageClient.WriteProtobuf(ctx, remoteClosureDataRef, defaultStorageOptions, workflowClosure)
if err != nil {
logger.Infof(ctx,
"failed to write marshaled workflow with id [%+v] to storage %s with err %v and base container: %s",
- request.Id, remoteClosureDataRef.String(), err, w.storageClient.GetBaseContainerFQN(ctx))
+ request.GetId(), remoteClosureDataRef.String(), err, w.storageClient.GetBaseContainerFQN(ctx))
return nil, errors.NewFlyteAdminErrorf(codes.Internal,
"failed to write marshaled workflow [%+v] to storage %s with err %v and base container: %s",
- request.Id, remoteClosureDataRef.String(), err, w.storageClient.GetBaseContainerFQN(ctx))
+ request.GetId(), remoteClosureDataRef.String(), err, w.storageClient.GetBaseContainerFQN(ctx))
}
// Save the workflow & its reference to the offloaded, compiled workflow in the database.
workflowModel, err := transformers.CreateWorkflowModel(
@@ -207,17 +207,17 @@ func (w *WorkflowManager) CreateWorkflow(
finalizedRequest, remoteClosureDataRef.String(), err)
return nil, err
}
- descriptionModel, err := transformers.CreateDescriptionEntityModel(request.Spec.Description, request.Id)
+ descriptionModel, err := transformers.CreateDescriptionEntityModel(request.GetSpec().GetDescription(), request.GetId())
if err != nil {
logger.Errorf(ctx,
- "Failed to transform description model [%+v] with err: %v", request.Spec.Description, err)
+ "Failed to transform description model [%+v] with err: %v", request.GetSpec().GetDescription(), err)
return nil, err
}
if descriptionModel != nil {
workflowModel.ShortDescription = descriptionModel.ShortDescription
}
if err = w.db.WorkflowRepo().Create(ctx, workflowModel, descriptionModel); err != nil {
- logger.Infof(ctx, "Failed to create workflow model [%+v] with err %v", request.Id, err)
+ logger.Infof(ctx, "Failed to create workflow model [%+v] with err %v", request.GetId(), err)
return nil, err
}
w.metrics.TypedInterfaceSizeBytes.Observe(float64(len(workflowModel.TypedInterface)))
@@ -226,14 +226,14 @@ func (w *WorkflowManager) CreateWorkflow(
}
func (w *WorkflowManager) GetWorkflow(ctx context.Context, request *admin.ObjectGetRequest) (*admin.Workflow, error) {
- if err := validation.ValidateIdentifier(request.Id, common.Workflow); err != nil {
- logger.Debugf(ctx, "invalid identifier [%+v]: %v", request.Id, err)
+ if err := validation.ValidateIdentifier(request.GetId(), common.Workflow); err != nil {
+ logger.Debugf(ctx, "invalid identifier [%+v]: %v", request.GetId(), err)
return nil, err
}
- ctx = getWorkflowContext(ctx, request.Id)
- workflow, err := util.GetWorkflow(ctx, w.db, w.storageClient, request.Id)
+ ctx = getWorkflowContext(ctx, request.GetId())
+ workflow, err := util.GetWorkflow(ctx, w.db, w.storageClient, request.GetId())
if err != nil {
- logger.Infof(ctx, "Failed to get workflow with id [%+v] with err %v", request.Id, err)
+ logger.Infof(ctx, "Failed to get workflow with id [%+v] with err %v", request.GetId(), err)
return nil, err
}
return workflow, nil
@@ -246,37 +246,37 @@ func (w *WorkflowManager) ListWorkflows(
if err := validation.ValidateResourceListRequest(request); err != nil {
return nil, err
}
- ctx = contextutils.WithProjectDomain(ctx, request.Id.Project, request.Id.Domain)
- ctx = contextutils.WithWorkflowID(ctx, request.Id.Name)
+ ctx = contextutils.WithProjectDomain(ctx, request.GetId().GetProject(), request.GetId().GetDomain())
+ ctx = contextutils.WithWorkflowID(ctx, request.GetId().GetName())
filters, err := util.GetDbFilters(util.FilterSpec{
- Project: request.Id.Project,
- Domain: request.Id.Domain,
- Name: request.Id.Name,
- RequestFilters: request.Filters,
+ Project: request.GetId().GetProject(),
+ Domain: request.GetId().GetDomain(),
+ Name: request.GetId().GetName(),
+ RequestFilters: request.GetFilters(),
}, common.Workflow)
if err != nil {
return nil, err
}
- sortParameter, err := common.NewSortParameter(request.SortBy, models.WorkflowColumns)
+ sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.WorkflowColumns)
if err != nil {
return nil, err
}
- offset, err := validation.ValidateToken(request.Token)
+ offset, err := validation.ValidateToken(request.GetToken())
if err != nil {
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument,
- "invalid pagination token %s for ListWorkflows", request.Token)
+ "invalid pagination token %s for ListWorkflows", request.GetToken())
}
listWorkflowsInput := repoInterfaces.ListResourceInput{
- Limit: int(request.Limit),
+ Limit: int(request.GetLimit()),
Offset: offset,
InlineFilters: filters,
SortParameter: sortParameter,
}
output, err := w.db.WorkflowRepo().List(ctx, listWorkflowsInput)
if err != nil {
- logger.Debugf(ctx, "Failed to list workflows with [%+v] with err %v", request.Id, err)
+ logger.Debugf(ctx, "Failed to list workflows with [%+v] with err %v", request.GetId(), err)
return nil, err
}
workflowList, err := transformers.FromWorkflowModels(output.Workflows)
@@ -286,7 +286,7 @@ func (w *WorkflowManager) ListWorkflows(
return nil, err
}
var token string
- if len(output.Workflows) == int(request.Limit) {
+ if len(output.Workflows) == int(request.GetLimit()) {
token = strconv.Itoa(offset + len(output.Workflows))
}
return &admin.WorkflowList{
@@ -301,28 +301,28 @@ func (w *WorkflowManager) ListWorkflowIdentifiers(ctx context.Context, request *
logger.Debugf(ctx, "invalid request [%+v]: %v", request, err)
return nil, err
}
- ctx = contextutils.WithProjectDomain(ctx, request.Project, request.Domain)
+ ctx = contextutils.WithProjectDomain(ctx, request.GetProject(), request.GetDomain())
filters, err := util.GetDbFilters(util.FilterSpec{
- Project: request.Project,
- Domain: request.Domain,
+ Project: request.GetProject(),
+ Domain: request.GetDomain(),
}, common.Workflow)
if err != nil {
return nil, err
}
- sortParameter, err := common.NewSortParameter(request.SortBy, models.WorkflowColumns)
+ sortParameter, err := common.NewSortParameter(request.GetSortBy(), models.WorkflowColumns)
if err != nil {
return nil, err
}
- offset, err := validation.ValidateToken(request.Token)
+ offset, err := validation.ValidateToken(request.GetToken())
if err != nil {
return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument,
- "invalid pagination token %s for ListWorkflowIdentifiers", request.Token)
+ "invalid pagination token %s for ListWorkflowIdentifiers", request.GetToken())
}
listWorkflowsInput := repoInterfaces.ListResourceInput{
- Limit: int(request.Limit),
+ Limit: int(request.GetLimit()),
Offset: offset,
InlineFilters: filters,
SortParameter: sortParameter,
@@ -331,12 +331,12 @@ func (w *WorkflowManager) ListWorkflowIdentifiers(ctx context.Context, request *
output, err := w.db.WorkflowRepo().ListIdentifiers(ctx, listWorkflowsInput)
if err != nil {
logger.Debugf(ctx, "Failed to list workflow ids with project: %s and domain: %s with err %v",
- request.Project, request.Domain, err)
+ request.GetProject(), request.GetDomain(), err)
return nil, err
}
var token string
- if len(output.Workflows) == int(request.Limit) {
+ if len(output.Workflows) == int(request.GetLimit()) {
token = strconv.Itoa(offset + len(output.Workflows))
}
entities := transformers.FromWorkflowModelsToIdentifiers(output.Workflows)
diff --git a/flyteadmin/pkg/manager/impl/workflow_manager_test.go b/flyteadmin/pkg/manager/impl/workflow_manager_test.go
index 8072453bbd..280624a956 100644
--- a/flyteadmin/pkg/manager/impl/workflow_manager_test.go
+++ b/flyteadmin/pkg/manager/impl/workflow_manager_test.go
@@ -128,7 +128,7 @@ func TestSetWorkflowDefaults(t *testing.T) {
request := testutils.GetWorkflowRequest()
finalizedRequest, err := workflowManager.(*WorkflowManager).setDefaults(request)
assert.NoError(t, err)
- assert.True(t, proto.Equal(workflowIdentifier, finalizedRequest.Spec.Template.Id))
+ assert.True(t, proto.Equal(workflowIdentifier, finalizedRequest.GetSpec().GetTemplate().GetId()))
}
func TestCreateWorkflow(t *testing.T) {
@@ -309,12 +309,12 @@ func TestGetWorkflow(t *testing.T) {
Id: workflowIdentifier,
})
assert.NoError(t, err)
- assert.Equal(t, "project", workflow.Id.Project)
- assert.Equal(t, "domain", workflow.Id.Domain)
- assert.Equal(t, "name", workflow.Id.Name)
- assert.Equal(t, "version", workflow.Id.Version)
- assert.True(t, proto.Equal(testutils.GetWorkflowClosure(), workflow.Closure),
- "%+v !=\n %+v", testutils.GetWorkflowClosure(), workflow.Closure)
+ assert.Equal(t, "project", workflow.GetId().GetProject())
+ assert.Equal(t, "domain", workflow.GetId().GetDomain())
+ assert.Equal(t, "name", workflow.GetId().GetName())
+ assert.Equal(t, "version", workflow.GetId().GetVersion())
+ assert.True(t, proto.Equal(testutils.GetWorkflowClosure(), workflow.GetClosure()),
+ "%+v !=\n %+v", testutils.GetWorkflowClosure(), workflow.GetClosure())
}
func TestGetWorkflow_DatabaseError(t *testing.T) {
@@ -450,13 +450,13 @@ func TestListWorkflows(t *testing.T) {
})
assert.NoError(t, err)
assert.NotNil(t, workflowList)
- assert.Len(t, workflowList.Workflows, 2)
+ assert.Len(t, workflowList.GetWorkflows(), 2)
- for idx, workflow := range workflowList.Workflows {
- assert.Equal(t, projectValue, workflow.Id.Project)
- assert.Equal(t, domainValue, workflow.Id.Domain)
- assert.Equal(t, nameValue, workflow.Id.Name)
- assert.Equal(t, fmt.Sprintf("version %v", idx), workflow.Id.Version)
+ for idx, workflow := range workflowList.GetWorkflows() {
+ assert.Equal(t, projectValue, workflow.GetId().GetProject())
+ assert.Equal(t, domainValue, workflow.GetId().GetDomain())
+ assert.Equal(t, nameValue, workflow.GetId().GetName())
+ assert.Equal(t, fmt.Sprintf("version %v", idx), workflow.GetId().GetVersion())
assert.True(t, proto.Equal(&admin.WorkflowClosure{
CreatedAt: testutils.MockCreatedAtProto,
CompiledWorkflow: &core.CompiledWorkflowClosure{
@@ -466,9 +466,9 @@ func TestListWorkflows(t *testing.T) {
},
},
},
- }, workflow.Closure))
+ }, workflow.GetClosure()))
}
- assert.Empty(t, workflowList.Token)
+ assert.Empty(t, workflowList.GetToken())
}
func TestListWorkflows_MissingParameters(t *testing.T) {
@@ -584,11 +584,11 @@ func TestWorkflowManager_ListWorkflowIdentifiers(t *testing.T) {
})
assert.NoError(t, err)
assert.NotNil(t, workflowList)
- assert.Len(t, workflowList.Entities, 2)
+ assert.Len(t, workflowList.GetEntities(), 2)
- for _, entity := range workflowList.Entities {
- assert.Equal(t, projectValue, entity.Project)
- assert.Equal(t, domainValue, entity.Domain)
- assert.Equal(t, nameValue, entity.Name)
+ for _, entity := range workflowList.GetEntities() {
+ assert.Equal(t, projectValue, entity.GetProject())
+ assert.Equal(t, domainValue, entity.GetDomain())
+ assert.Equal(t, nameValue, entity.GetName())
}
}
diff --git a/flyteadmin/pkg/repositories/gormimpl/common.go b/flyteadmin/pkg/repositories/gormimpl/common.go
index b103ef0e43..7f4d4f370a 100644
--- a/flyteadmin/pkg/repositories/gormimpl/common.go
+++ b/flyteadmin/pkg/repositories/gormimpl/common.go
@@ -52,17 +52,14 @@ var entityToTableName = map[common.Entity]string{
}
var innerJoinExecToNodeExec = fmt.Sprintf(
- "INNER JOIN %[1]s ON %[2]s.execution_project = %[1]s.execution_project AND "+
- "%[2]s.execution_domain = %[1]s.execution_domain AND %[2]s.execution_name = %[1]s.execution_name",
+ "INNER JOIN %[1]s ON %[2]s.execution_project = %[1]s.execution_project AND %[2]s.execution_domain = %[1]s.execution_domain AND %[2]s.execution_name = %[1]s.execution_name",
executionTableName, nodeExecutionTableName)
var innerJoinExecToTaskExec = fmt.Sprintf(
- "INNER JOIN %[1]s ON %[2]s.execution_project = %[1]s.execution_project AND "+
- "%[2]s.execution_domain = %[1]s.execution_domain AND %[2]s.execution_name = %[1]s.execution_name",
+ "INNER JOIN %[1]s ON %[2]s.execution_project = %[1]s.execution_project AND %[2]s.execution_domain = %[1]s.execution_domain AND %[2]s.execution_name = %[1]s.execution_name",
executionTableName, taskExecutionTableName)
var innerJoinNodeExecToTaskExec = fmt.Sprintf(
- "INNER JOIN %[1]s ON %s.node_id = %[1]s.node_id AND %[2]s.execution_project = %[1]s.execution_project AND "+
- "%[2]s.execution_domain = %[1]s.execution_domain AND %[2]s.execution_name = %[1]s.execution_name",
+ "INNER JOIN %[1]s ON %s.node_id = %[1]s.node_id AND %[2]s.execution_project = %[1]s.execution_project AND %[2]s.execution_domain = %[1]s.execution_domain AND %[2]s.execution_name = %[1]s.execution_name",
nodeExecutionTableName, taskExecutionTableName)
// Because dynamic tasks do NOT necessarily register static task definitions, we use a left join to not exclude
diff --git a/flyteadmin/pkg/repositories/gormimpl/node_execution_repo.go b/flyteadmin/pkg/repositories/gormimpl/node_execution_repo.go
index b1772862dc..f39c6df554 100644
--- a/flyteadmin/pkg/repositories/gormimpl/node_execution_repo.go
+++ b/flyteadmin/pkg/repositories/gormimpl/node_execution_repo.go
@@ -36,11 +36,11 @@ func (r *NodeExecutionRepo) Get(ctx context.Context, input interfaces.NodeExecut
timer := r.metrics.GetDuration.Start()
tx := r.db.WithContext(ctx).Where(&models.NodeExecution{
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: input.NodeExecutionIdentifier.NodeId,
+ NodeID: input.NodeExecutionIdentifier.GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: input.NodeExecutionIdentifier.ExecutionId.Project,
- Domain: input.NodeExecutionIdentifier.ExecutionId.Domain,
- Name: input.NodeExecutionIdentifier.ExecutionId.Name,
+ Project: input.NodeExecutionIdentifier.GetExecutionId().GetProject(),
+ Domain: input.NodeExecutionIdentifier.GetExecutionId().GetDomain(),
+ Name: input.NodeExecutionIdentifier.GetExecutionId().GetName(),
},
},
}).Take(&nodeExecution)
@@ -49,11 +49,11 @@ func (r *NodeExecutionRepo) Get(ctx context.Context, input interfaces.NodeExecut
if tx.Error != nil && errors.Is(tx.Error, gorm.ErrRecordNotFound) {
return models.NodeExecution{},
adminErrors.GetMissingEntityError("node execution", &core.NodeExecutionIdentifier{
- NodeId: input.NodeExecutionIdentifier.NodeId,
+ NodeId: input.NodeExecutionIdentifier.GetNodeId(),
ExecutionId: &core.WorkflowExecutionIdentifier{
- Project: input.NodeExecutionIdentifier.ExecutionId.Project,
- Domain: input.NodeExecutionIdentifier.ExecutionId.Domain,
- Name: input.NodeExecutionIdentifier.ExecutionId.Name,
+ Project: input.NodeExecutionIdentifier.GetExecutionId().GetProject(),
+ Domain: input.NodeExecutionIdentifier.GetExecutionId().GetDomain(),
+ Name: input.NodeExecutionIdentifier.GetExecutionId().GetName(),
},
})
} else if tx.Error != nil {
@@ -68,11 +68,11 @@ func (r *NodeExecutionRepo) GetWithChildren(ctx context.Context, input interface
timer := r.metrics.GetDuration.Start()
tx := r.db.WithContext(ctx).Where(&models.NodeExecution{
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: input.NodeExecutionIdentifier.NodeId,
+ NodeID: input.NodeExecutionIdentifier.GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: input.NodeExecutionIdentifier.ExecutionId.Project,
- Domain: input.NodeExecutionIdentifier.ExecutionId.Domain,
- Name: input.NodeExecutionIdentifier.ExecutionId.Name,
+ Project: input.NodeExecutionIdentifier.GetExecutionId().GetProject(),
+ Domain: input.NodeExecutionIdentifier.GetExecutionId().GetDomain(),
+ Name: input.NodeExecutionIdentifier.GetExecutionId().GetName(),
},
},
}).Preload("ChildNodeExecutions").Take(&nodeExecution)
@@ -81,11 +81,11 @@ func (r *NodeExecutionRepo) GetWithChildren(ctx context.Context, input interface
if tx.Error != nil && errors.Is(tx.Error, gorm.ErrRecordNotFound) {
return models.NodeExecution{},
adminErrors.GetMissingEntityError("node execution", &core.NodeExecutionIdentifier{
- NodeId: input.NodeExecutionIdentifier.NodeId,
+ NodeId: input.NodeExecutionIdentifier.GetNodeId(),
ExecutionId: &core.WorkflowExecutionIdentifier{
- Project: input.NodeExecutionIdentifier.ExecutionId.Project,
- Domain: input.NodeExecutionIdentifier.ExecutionId.Domain,
- Name: input.NodeExecutionIdentifier.ExecutionId.Name,
+ Project: input.NodeExecutionIdentifier.GetExecutionId().GetProject(),
+ Domain: input.NodeExecutionIdentifier.GetExecutionId().GetDomain(),
+ Name: input.NodeExecutionIdentifier.GetExecutionId().GetName(),
},
})
} else if tx.Error != nil {
@@ -144,11 +144,11 @@ func (r *NodeExecutionRepo) Exists(ctx context.Context, input interfaces.NodeExe
timer := r.metrics.ExistsDuration.Start()
tx := r.db.WithContext(ctx).Select(ID).Where(&models.NodeExecution{
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: input.NodeExecutionIdentifier.NodeId,
+ NodeID: input.NodeExecutionIdentifier.GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: input.NodeExecutionIdentifier.ExecutionId.Project,
- Domain: input.NodeExecutionIdentifier.ExecutionId.Domain,
- Name: input.NodeExecutionIdentifier.ExecutionId.Name,
+ Project: input.NodeExecutionIdentifier.GetExecutionId().GetProject(),
+ Domain: input.NodeExecutionIdentifier.GetExecutionId().GetDomain(),
+ Name: input.NodeExecutionIdentifier.GetExecutionId().GetName(),
},
},
}).Take(&nodeExecution)
diff --git a/flyteadmin/pkg/repositories/gormimpl/task_execution_repo.go b/flyteadmin/pkg/repositories/gormimpl/task_execution_repo.go
index d4d30bef85..ba473c2968 100644
--- a/flyteadmin/pkg/repositories/gormimpl/task_execution_repo.go
+++ b/flyteadmin/pkg/repositories/gormimpl/task_execution_repo.go
@@ -37,17 +37,17 @@ func (r *TaskExecutionRepo) Get(ctx context.Context, input interfaces.GetTaskExe
tx := r.db.WithContext(ctx).Where(&models.TaskExecution{
TaskExecutionKey: models.TaskExecutionKey{
TaskKey: models.TaskKey{
- Project: input.TaskExecutionID.TaskId.Project,
- Domain: input.TaskExecutionID.TaskId.Domain,
- Name: input.TaskExecutionID.TaskId.Name,
- Version: input.TaskExecutionID.TaskId.Version,
+ Project: input.TaskExecutionID.GetTaskId().GetProject(),
+ Domain: input.TaskExecutionID.GetTaskId().GetDomain(),
+ Name: input.TaskExecutionID.GetTaskId().GetName(),
+ Version: input.TaskExecutionID.GetTaskId().GetVersion(),
},
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: input.TaskExecutionID.NodeExecutionId.NodeId,
+ NodeID: input.TaskExecutionID.GetNodeExecutionId().GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: input.TaskExecutionID.NodeExecutionId.ExecutionId.Project,
- Domain: input.TaskExecutionID.NodeExecutionId.ExecutionId.Domain,
- Name: input.TaskExecutionID.NodeExecutionId.ExecutionId.Name,
+ Project: input.TaskExecutionID.GetNodeExecutionId().GetExecutionId().GetProject(),
+ Domain: input.TaskExecutionID.GetNodeExecutionId().GetExecutionId().GetDomain(),
+ Name: input.TaskExecutionID.GetNodeExecutionId().GetExecutionId().GetName(),
},
},
RetryAttempt: &input.TaskExecutionID.RetryAttempt,
@@ -59,17 +59,17 @@ func (r *TaskExecutionRepo) Get(ctx context.Context, input interfaces.GetTaskExe
return models.TaskExecution{},
flyteAdminDbErrors.GetMissingEntityError("task execution", &core.TaskExecutionIdentifier{
TaskId: &core.Identifier{
- Project: input.TaskExecutionID.TaskId.Project,
- Domain: input.TaskExecutionID.TaskId.Domain,
- Name: input.TaskExecutionID.TaskId.Name,
- Version: input.TaskExecutionID.TaskId.Version,
+ Project: input.TaskExecutionID.GetTaskId().GetProject(),
+ Domain: input.TaskExecutionID.GetTaskId().GetDomain(),
+ Name: input.TaskExecutionID.GetTaskId().GetName(),
+ Version: input.TaskExecutionID.GetTaskId().GetVersion(),
},
NodeExecutionId: &core.NodeExecutionIdentifier{
- NodeId: input.TaskExecutionID.NodeExecutionId.NodeId,
+ NodeId: input.TaskExecutionID.GetNodeExecutionId().GetNodeId(),
ExecutionId: &core.WorkflowExecutionIdentifier{
- Project: input.TaskExecutionID.NodeExecutionId.ExecutionId.Project,
- Domain: input.TaskExecutionID.NodeExecutionId.ExecutionId.Domain,
- Name: input.TaskExecutionID.NodeExecutionId.ExecutionId.Name,
+ Project: input.TaskExecutionID.GetNodeExecutionId().GetExecutionId().GetProject(),
+ Domain: input.TaskExecutionID.GetNodeExecutionId().GetExecutionId().GetDomain(),
+ Name: input.TaskExecutionID.GetNodeExecutionId().GetExecutionId().GetName(),
},
},
})
diff --git a/flyteadmin/pkg/repositories/transformers/description_entity.go b/flyteadmin/pkg/repositories/transformers/description_entity.go
index d3a816179b..8e16ef0c6d 100644
--- a/flyteadmin/pkg/repositories/transformers/description_entity.go
+++ b/flyteadmin/pkg/repositories/transformers/description_entity.go
@@ -26,34 +26,34 @@ func CreateDescriptionEntityModel(
var sourceCode models.SourceCode
var err error
- if descriptionEntity.LongDescription != nil {
- longDescriptionBytes, err = proto.Marshal(descriptionEntity.LongDescription)
+ if descriptionEntity.GetLongDescription() != nil {
+ longDescriptionBytes, err = proto.Marshal(descriptionEntity.GetLongDescription())
if err != nil {
logger.Errorf(ctx, "Failed to marshal LongDescription with error: %v", err)
return nil, err
}
}
- if descriptionEntity.LongDescription != nil {
- longDescriptionBytes, err = proto.Marshal(descriptionEntity.LongDescription)
+ if descriptionEntity.GetLongDescription() != nil {
+ longDescriptionBytes, err = proto.Marshal(descriptionEntity.GetLongDescription())
if err != nil {
logger.Errorf(ctx, "Failed to marshal LongDescription with error: %v", err)
return nil, err
}
}
- if descriptionEntity.SourceCode != nil {
- sourceCode = models.SourceCode{Link: descriptionEntity.SourceCode.Link}
+ if descriptionEntity.GetSourceCode() != nil {
+ sourceCode = models.SourceCode{Link: descriptionEntity.GetSourceCode().GetLink()}
}
return &models.DescriptionEntity{
DescriptionEntityKey: models.DescriptionEntityKey{
- ResourceType: id.ResourceType,
- Project: id.Project,
- Domain: id.Domain,
- Name: id.Name,
- Version: id.Version,
+ ResourceType: id.GetResourceType(),
+ Project: id.GetProject(),
+ Domain: id.GetDomain(),
+ Name: id.GetName(),
+ Version: id.GetVersion(),
},
- ShortDescription: descriptionEntity.ShortDescription,
+ ShortDescription: descriptionEntity.GetShortDescription(),
LongDescription: longDescriptionBytes,
SourceCode: sourceCode,
}, nil
diff --git a/flyteadmin/pkg/repositories/transformers/description_entity_test.go b/flyteadmin/pkg/repositories/transformers/description_entity_test.go
index 9279ff0f65..b8feeb91b6 100644
--- a/flyteadmin/pkg/repositories/transformers/description_entity_test.go
+++ b/flyteadmin/pkg/repositories/transformers/description_entity_test.go
@@ -37,7 +37,7 @@ func TestToDescriptionEntityExecutionModel(t *testing.T) {
assert.Nil(t, err)
assert.Equal(t, shortDescription, model.ShortDescription)
assert.Equal(t, longDescriptionBytes, model.LongDescription)
- assert.Equal(t, sourceCode.Link, model.Link)
+ assert.Equal(t, sourceCode.GetLink(), model.Link)
}
func TestFromDescriptionEntityExecutionModel(t *testing.T) {
@@ -59,9 +59,9 @@ func TestFromDescriptionEntityExecutionModel(t *testing.T) {
SourceCode: models.SourceCode{Link: "https://github/flyte"},
})
assert.Nil(t, err)
- assert.Equal(t, descriptionEntity.ShortDescription, shortDescription)
- assert.Equal(t, descriptionEntity.LongDescription.IconLink, longDescription.IconLink)
- assert.Equal(t, descriptionEntity.SourceCode, sourceCode)
+ assert.Equal(t, descriptionEntity.GetShortDescription(), shortDescription)
+ assert.Equal(t, descriptionEntity.GetLongDescription().GetIconLink(), longDescription.GetIconLink())
+ assert.Equal(t, descriptionEntity.GetSourceCode(), sourceCode)
}
func TestFromDescriptionEntityExecutionModels(t *testing.T) {
@@ -85,7 +85,7 @@ func TestFromDescriptionEntityExecutionModels(t *testing.T) {
},
})
assert.Nil(t, err)
- assert.Equal(t, descriptionEntity[0].ShortDescription, shortDescription)
- assert.Equal(t, descriptionEntity[0].LongDescription.IconLink, longDescription.IconLink)
- assert.Equal(t, descriptionEntity[0].SourceCode, sourceCode)
+ assert.Equal(t, descriptionEntity[0].GetShortDescription(), shortDescription)
+ assert.Equal(t, descriptionEntity[0].GetLongDescription().GetIconLink(), longDescription.GetIconLink())
+ assert.Equal(t, descriptionEntity[0].GetSourceCode(), sourceCode)
}
diff --git a/flyteadmin/pkg/repositories/transformers/execution.go b/flyteadmin/pkg/repositories/transformers/execution.go
index 711f6bdddb..8943d2303b 100644
--- a/flyteadmin/pkg/repositories/transformers/execution.go
+++ b/flyteadmin/pkg/repositories/transformers/execution.go
@@ -61,7 +61,7 @@ var ListExecutionTransformerOptions = &ExecutionTransformerOptions{
// CreateExecutionModel transforms a ExecutionCreateRequest to a Execution model
func CreateExecutionModel(input CreateExecutionModelInput) (*models.Execution, error) {
requestSpec := input.RequestSpec
- if requestSpec.Metadata == nil {
+ if requestSpec.GetMetadata() == nil {
requestSpec.Metadata = &admin.ExecutionMetadata{}
}
requestSpec.Metadata.SystemMetadata = &admin.SystemMetadata{
@@ -81,7 +81,7 @@ func CreateExecutionModel(input CreateExecutionModelInput) (*models.Execution, e
WorkflowId: input.WorkflowIdentifier,
StateChangeDetails: &admin.ExecutionStateChangeDetails{
State: admin.ExecutionState_EXECUTION_ACTIVE,
- Principal: requestSpec.Metadata.Principal,
+ Principal: requestSpec.GetMetadata().GetPrincipal(),
OccurredAt: createdAt,
},
}
@@ -114,12 +114,12 @@ func CreateExecutionModel(input CreateExecutionModelInput) (*models.Execution, e
executionModel := &models.Execution{
ExecutionKey: models.ExecutionKey{
- Project: input.WorkflowExecutionID.Project,
- Domain: input.WorkflowExecutionID.Domain,
- Name: input.WorkflowExecutionID.Name,
+ Project: input.WorkflowExecutionID.GetProject(),
+ Domain: input.WorkflowExecutionID.GetDomain(),
+ Name: input.WorkflowExecutionID.GetName(),
},
Spec: spec,
- Phase: closure.Phase.String(),
+ Phase: closure.GetPhase().String(),
Closure: closureBytes,
WorkflowID: input.WorkflowID,
ExecutionCreatedAt: &input.CreatedAt,
@@ -129,7 +129,7 @@ func CreateExecutionModel(input CreateExecutionModelInput) (*models.Execution, e
Cluster: input.Cluster,
InputsURI: input.InputsURI,
UserInputsURI: input.UserInputsURI,
- User: requestSpec.Metadata.Principal,
+ User: requestSpec.GetMetadata().GetPrincipal(),
State: &activeExecution,
LaunchEntity: strings.ToLower(input.LaunchEntity.String()),
}
@@ -140,8 +140,8 @@ func CreateExecutionModel(input CreateExecutionModelInput) (*models.Execution, e
} else {
executionModel.LaunchPlanID = input.LaunchPlanID
}
- if input.RequestSpec.Metadata != nil {
- executionModel.Mode = int32(input.RequestSpec.Metadata.Mode)
+ if input.RequestSpec.GetMetadata() != nil {
+ executionModel.Mode = int32(input.RequestSpec.GetMetadata().GetMode())
}
return executionModel, nil
@@ -151,13 +151,13 @@ func CreateExecutionModel(input CreateExecutionModelInput) (*models.Execution, e
func CreateExecutionTagModel(input CreateExecutionModelInput) ([]*models.ExecutionTag, error) {
tags := make([]*models.ExecutionTag, 0)
- if input.RequestSpec.Labels != nil {
- for k, v := range input.RequestSpec.Labels.Values {
+ if input.RequestSpec.GetLabels() != nil {
+ for k, v := range input.RequestSpec.GetLabels().GetValues() {
tags = append(tags, &models.ExecutionTag{
ExecutionKey: models.ExecutionKey{
- Project: input.WorkflowExecutionID.Project,
- Domain: input.WorkflowExecutionID.Domain,
- Name: input.WorkflowExecutionID.Name,
+ Project: input.WorkflowExecutionID.GetProject(),
+ Domain: input.WorkflowExecutionID.GetDomain(),
+ Name: input.WorkflowExecutionID.GetName(),
},
Key: k,
Value: v,
@@ -165,12 +165,12 @@ func CreateExecutionTagModel(input CreateExecutionModelInput) ([]*models.Executi
}
}
- for _, v := range input.RequestSpec.Tags {
+ for _, v := range input.RequestSpec.GetTags() {
tags = append(tags, &models.ExecutionTag{
ExecutionKey: models.ExecutionKey{
- Project: input.WorkflowExecutionID.Project,
- Domain: input.WorkflowExecutionID.Domain,
- Name: input.WorkflowExecutionID.Name,
+ Project: input.WorkflowExecutionID.GetProject(),
+ Domain: input.WorkflowExecutionID.GetDomain(),
+ Name: input.WorkflowExecutionID.GetName(),
},
Key: v,
Value: "",
@@ -189,10 +189,10 @@ func reassignCluster(ctx context.Context, cluster string, executionID *core.Work
if err != nil {
return flyteErrs.NewFlyteAdminErrorf(codes.Internal, "Failed to unmarshal execution spec: %v", err)
}
- if executionSpec.Metadata == nil {
+ if executionSpec.GetMetadata() == nil {
executionSpec.Metadata = &admin.ExecutionMetadata{}
}
- if executionSpec.Metadata.SystemMetadata == nil {
+ if executionSpec.GetMetadata().GetSystemMetadata() == nil {
executionSpec.Metadata.SystemMetadata = &admin.SystemMetadata{}
}
executionSpec.Metadata.SystemMetadata.ExecutionCluster = cluster
@@ -214,64 +214,64 @@ func UpdateExecutionModelState(
if err != nil {
return flyteErrs.NewFlyteAdminErrorf(codes.Internal, "Failed to unmarshal execution closure: %v", err)
}
- executionClosure.Phase = request.Event.Phase
- executionClosure.UpdatedAt = request.Event.OccurredAt
- execution.Phase = request.Event.Phase.String()
+ executionClosure.Phase = request.GetEvent().GetPhase()
+ executionClosure.UpdatedAt = request.GetEvent().GetOccurredAt()
+ execution.Phase = request.GetEvent().GetPhase().String()
- occurredAtTimestamp, err := ptypes.Timestamp(request.Event.OccurredAt)
+ occurredAtTimestamp, err := ptypes.Timestamp(request.GetEvent().GetOccurredAt())
if err != nil {
return flyteErrs.NewFlyteAdminErrorf(codes.Internal, "Failed to parse OccurredAt: %v", err)
}
execution.ExecutionUpdatedAt = &occurredAtTimestamp
// only mark the execution started when we get the initial running event
- if request.Event.Phase == core.WorkflowExecution_RUNNING {
+ if request.GetEvent().GetPhase() == core.WorkflowExecution_RUNNING {
execution.StartedAt = &occurredAtTimestamp
- executionClosure.StartedAt = request.Event.OccurredAt
- } else if common.IsExecutionTerminal(request.Event.Phase) {
+ executionClosure.StartedAt = request.GetEvent().GetOccurredAt()
+ } else if common.IsExecutionTerminal(request.GetEvent().GetPhase()) {
if execution.StartedAt != nil {
execution.Duration = occurredAtTimestamp.Sub(*execution.StartedAt)
executionClosure.Duration = ptypes.DurationProto(execution.Duration)
} else {
logger.Infof(context.Background(),
- "Cannot compute duration because startedAt was never set, requestId: %v", request.RequestId)
+ "Cannot compute duration because startedAt was never set, requestId: %v", request.GetRequestId())
}
}
// Default or empty cluster values do not require updating the execution model.
- ignoreClusterFromEvent := len(request.Event.ProducerId) == 0 || request.Event.ProducerId == common.DefaultProducerID
- logger.Debugf(ctx, "Producer Id [%v]. IgnoreClusterFromEvent [%v]", request.Event.ProducerId, ignoreClusterFromEvent)
+ ignoreClusterFromEvent := len(request.GetEvent().GetProducerId()) == 0 || request.GetEvent().GetProducerId() == common.DefaultProducerID
+ logger.Debugf(ctx, "Producer Id [%v]. IgnoreClusterFromEvent [%v]", request.GetEvent().GetProducerId(), ignoreClusterFromEvent)
if !ignoreClusterFromEvent {
if clusterReassignablePhases.Has(execution.Phase) {
- if err := reassignCluster(ctx, request.Event.ProducerId, request.Event.ExecutionId, execution); err != nil {
+ if err := reassignCluster(ctx, request.GetEvent().GetProducerId(), request.GetEvent().GetExecutionId(), execution); err != nil {
return err
}
- } else if execution.Cluster != request.Event.ProducerId {
+ } else if execution.Cluster != request.GetEvent().GetProducerId() {
errorMsg := fmt.Sprintf("Cannot accept events for running/terminated execution [%v] from cluster [%s],"+
"expected events to originate from [%s]",
- request.Event.ExecutionId, request.Event.ProducerId, execution.Cluster)
+ request.GetEvent().GetExecutionId(), request.GetEvent().GetProducerId(), execution.Cluster)
return flyteErrs.NewIncompatibleClusterError(ctx, errorMsg, execution.Cluster)
}
}
- if request.Event.GetOutputUri() != "" {
+ if request.GetEvent().GetOutputUri() != "" {
executionClosure.OutputResult = &admin.ExecutionClosure_Outputs{
Outputs: &admin.LiteralMapBlob{
Data: &admin.LiteralMapBlob_Uri{
- Uri: request.Event.GetOutputUri(),
+ Uri: request.GetEvent().GetOutputUri(),
},
},
}
- } else if request.Event.GetOutputData() != nil {
+ } else if request.GetEvent().GetOutputData() != nil {
switch inlineEventDataPolicy {
case interfaces.InlineEventDataPolicyStoreInline:
executionClosure.OutputResult = &admin.ExecutionClosure_OutputData{
- OutputData: request.Event.GetOutputData(),
+ OutputData: request.GetEvent().GetOutputData(),
}
default:
logger.Debugf(ctx, "Offloading outputs per InlineEventDataPolicy")
- uri, err := common.OffloadLiteralMap(ctx, storageClient, request.Event.GetOutputData(),
- request.Event.ExecutionId.Project, request.Event.ExecutionId.Domain, request.Event.ExecutionId.Name, OutputsObjectSuffix)
+ uri, err := common.OffloadLiteralMap(ctx, storageClient, request.GetEvent().GetOutputData(),
+ request.GetEvent().GetExecutionId().GetProject(), request.GetEvent().GetExecutionId().GetDomain(), request.GetEvent().GetExecutionId().GetName(), OutputsObjectSuffix)
if err != nil {
return err
}
@@ -283,11 +283,11 @@ func UpdateExecutionModelState(
},
}
}
- } else if request.Event.GetError() != nil {
+ } else if request.GetEvent().GetError() != nil {
executionClosure.OutputResult = &admin.ExecutionClosure_Error{
- Error: request.Event.GetError(),
+ Error: request.GetEvent().GetError(),
}
- k := request.Event.GetError().Kind.String()
+ k := request.GetEvent().GetError().GetKind().String()
execution.ErrorKind = &k
execution.ErrorCode = &request.Event.GetError().Code
}
@@ -372,13 +372,13 @@ func FromExecutionModel(ctx context.Context, executionModel models.Execution, op
return nil, flyteErrs.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal spec")
}
if len(opts.DefaultNamespace) > 0 {
- if spec.Metadata == nil {
+ if spec.GetMetadata() == nil {
spec.Metadata = &admin.ExecutionMetadata{}
}
- if spec.Metadata.SystemMetadata == nil {
+ if spec.GetMetadata().GetSystemMetadata() == nil {
spec.Metadata.SystemMetadata = &admin.SystemMetadata{}
}
- if len(spec.GetMetadata().GetSystemMetadata().Namespace) == 0 {
+ if len(spec.GetMetadata().GetSystemMetadata().GetNamespace()) == 0 {
logger.Infof(ctx, "setting execution system metadata namespace to [%s]", opts.DefaultNamespace)
spec.Metadata.SystemMetadata.Namespace = opts.DefaultNamespace
}
@@ -388,7 +388,7 @@ func FromExecutionModel(ctx context.Context, executionModel models.Execution, op
if err = proto.Unmarshal(executionModel.Closure, &closure); err != nil {
return nil, flyteErrs.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal closure")
}
- if closure.GetError() != nil && opts != nil && opts.TrimErrorMessage && len(closure.GetError().Message) > 0 {
+ if closure.GetError() != nil && opts != nil && opts.TrimErrorMessage && len(closure.GetError().GetMessage()) > 0 {
trimmedErrOutputResult := closure.GetError()
trimmedErrMessage := TrimErrorMessage(trimmedErrOutputResult.GetMessage())
trimmedErrOutputResult.Message = trimmedErrMessage
@@ -397,7 +397,7 @@ func FromExecutionModel(ctx context.Context, executionModel models.Execution, op
}
}
- if closure.StateChangeDetails == nil {
+ if closure.GetStateChangeDetails() == nil {
// Update execution state details from model for older executions
if closure.StateChangeDetails, err = PopulateDefaultStateChangeDetails(executionModel); err != nil {
return nil, err
diff --git a/flyteadmin/pkg/repositories/transformers/execution_event.go b/flyteadmin/pkg/repositories/transformers/execution_event.go
index 34f3c4d84a..f603af44e6 100644
--- a/flyteadmin/pkg/repositories/transformers/execution_event.go
+++ b/flyteadmin/pkg/repositories/transformers/execution_event.go
@@ -11,18 +11,18 @@ import (
// Transforms a ExecutionEventCreateRequest to a ExecutionEvent model
func CreateExecutionEventModel(request *admin.WorkflowExecutionEventRequest) (*models.ExecutionEvent, error) {
- occurredAt, err := ptypes.Timestamp(request.Event.OccurredAt)
+ occurredAt, err := ptypes.Timestamp(request.GetEvent().GetOccurredAt())
if err != nil {
return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to marshal occurred at timestamp")
}
return &models.ExecutionEvent{
ExecutionKey: models.ExecutionKey{
- Project: request.Event.ExecutionId.Project,
- Domain: request.Event.ExecutionId.Domain,
- Name: request.Event.ExecutionId.Name,
+ Project: request.GetEvent().GetExecutionId().GetProject(),
+ Domain: request.GetEvent().GetExecutionId().GetDomain(),
+ Name: request.GetEvent().GetExecutionId().GetName(),
},
- RequestID: request.RequestId,
+ RequestID: request.GetRequestId(),
OccurredAt: occurredAt,
- Phase: request.Event.Phase.String(),
+ Phase: request.GetEvent().GetPhase().String(),
}, nil
}
diff --git a/flyteadmin/pkg/repositories/transformers/execution_test.go b/flyteadmin/pkg/repositories/transformers/execution_test.go
index c7b9f33e95..5ea50cefe4 100644
--- a/flyteadmin/pkg/repositories/transformers/execution_test.go
+++ b/flyteadmin/pkg/repositories/transformers/execution_test.go
@@ -78,7 +78,7 @@ func TestCreateExecutionModel(t *testing.T) {
Domain: "domain",
Name: "name",
},
- RequestSpec: execRequest.Spec,
+ RequestSpec: execRequest.GetSpec(),
LaunchPlanID: lpID,
WorkflowID: wfID,
CreatedAt: createdAt,
@@ -103,7 +103,7 @@ func TestCreateExecutionModel(t *testing.T) {
assert.Equal(t, sourceID, execution.SourceExecutionID)
assert.Equal(t, "launch_plan", execution.LaunchEntity)
assert.Equal(t, execution.Phase, core.WorkflowExecution_UNDEFINED.String())
- expectedSpec := execRequest.Spec
+ expectedSpec := execRequest.GetSpec()
expectedSpec.Metadata.Principal = principal
expectedSpec.Metadata.SystemMetadata = &admin.SystemMetadata{
ExecutionCluster: cluster,
@@ -136,7 +136,7 @@ func TestCreateExecutionModel(t *testing.T) {
Domain: "domain",
Name: "name",
},
- RequestSpec: execRequest.Spec,
+ RequestSpec: execRequest.GetSpec(),
LaunchPlanID: lpID,
WorkflowID: wfID,
CreatedAt: createdAt,
@@ -162,7 +162,7 @@ func TestCreateExecutionModel(t *testing.T) {
assert.Equal(t, sourceID, execution.SourceExecutionID)
assert.Equal(t, "launch_plan", execution.LaunchEntity)
assert.Equal(t, core.WorkflowExecution_FAILED.String(), execution.Phase)
- expectedSpec := execRequest.Spec
+ expectedSpec := execRequest.GetSpec()
expectedSpec.Metadata.Principal = principal
expectedSpec.Metadata.SystemMetadata = &admin.SystemMetadata{
ExecutionCluster: cluster,
@@ -202,7 +202,7 @@ func TestCreateExecutionModel(t *testing.T) {
Domain: "domain",
Name: "name",
},
- RequestSpec: execRequest.Spec,
+ RequestSpec: execRequest.GetSpec(),
LaunchPlanID: lpID,
WorkflowID: wfID,
CreatedAt: createdAt,
@@ -228,7 +228,7 @@ func TestCreateExecutionModel(t *testing.T) {
assert.Equal(t, sourceID, execution.SourceExecutionID)
assert.Equal(t, "launch_plan", execution.LaunchEntity)
assert.Equal(t, core.WorkflowExecution_FAILED.String(), execution.Phase)
- expectedSpec := execRequest.Spec
+ expectedSpec := execRequest.GetSpec()
expectedSpec.Metadata.Principal = principal
expectedSpec.Metadata.SystemMetadata = &admin.SystemMetadata{
ExecutionCluster: cluster,
@@ -268,7 +268,7 @@ func TestCreateExecutionModel(t *testing.T) {
Domain: "domain",
Name: "name",
},
- RequestSpec: execRequest.Spec,
+ RequestSpec: execRequest.GetSpec(),
LaunchPlanID: lpID,
WorkflowID: wfID,
CreatedAt: createdAt,
@@ -294,7 +294,7 @@ func TestCreateExecutionModel(t *testing.T) {
assert.Equal(t, sourceID, execution.SourceExecutionID)
assert.Equal(t, "launch_plan", execution.LaunchEntity)
assert.Equal(t, core.WorkflowExecution_FAILED.String(), execution.Phase)
- expectedSpec := execRequest.Spec
+ expectedSpec := execRequest.GetSpec()
expectedSpec.Metadata.Principal = principal
expectedSpec.Metadata.SystemMetadata = &admin.SystemMetadata{
ExecutionCluster: cluster,
@@ -341,7 +341,7 @@ func TestUpdateModelState_UnknownToRunning(t *testing.T) {
Phase: core.WorkflowExecution_UNDEFINED,
CreatedAt: createdAtProto,
}
- spec := testutils.GetExecutionRequest().Spec
+ spec := testutils.GetExecutionRequest().GetSpec()
specBytes, _ := proto.Marshal(spec)
existingClosureBytes, _ := proto.Marshal(&existingClosure)
startedAt := time.Now()
@@ -401,7 +401,7 @@ func TestUpdateModelState_RunningToFailed(t *testing.T) {
}
ec := "foo"
ek := core.ExecutionError_SYSTEM
- spec := testutils.GetExecutionRequest().Spec
+ spec := testutils.GetExecutionRequest().GetSpec()
specBytes, _ := proto.Marshal(spec)
existingClosureBytes, _ := proto.Marshal(&existingClosure)
executionModel := getRunningExecutionModel(specBytes, existingClosureBytes, startedAt)
@@ -474,7 +474,7 @@ func TestUpdateModelState_RunningToSuccess(t *testing.T) {
Phase: core.WorkflowExecution_RUNNING,
StartedAt: startedAtProto,
}
- spec := testutils.GetExecutionRequest().Spec
+ spec := testutils.GetExecutionRequest().GetSpec()
specBytes, _ := proto.Marshal(spec)
existingClosureBytes, _ := proto.Marshal(&existingClosure)
executionModel := getRunningExecutionModel(specBytes, existingClosureBytes, startedAt)
@@ -692,7 +692,7 @@ func TestGetExecutionIdentifier(t *testing.T) {
}
func TestFromExecutionModel(t *testing.T) {
- spec := testutils.GetExecutionRequest().Spec
+ spec := testutils.GetExecutionRequest().GetSpec()
specBytes, _ := proto.Marshal(spec)
phase := core.WorkflowExecution_RUNNING.String()
startedAt := time.Date(2018, 8, 30, 0, 0, 0, 0, time.UTC)
@@ -700,7 +700,7 @@ func TestFromExecutionModel(t *testing.T) {
startedAtProto, _ := ptypes.TimestampProto(startedAt)
createdAtProto, _ := ptypes.TimestampProto(createdAt)
closure := admin.ExecutionClosure{
- ComputedInputs: spec.Inputs,
+ ComputedInputs: spec.GetInputs(),
Phase: core.WorkflowExecution_RUNNING,
StartedAt: startedAtProto,
StateChangeDetails: &admin.ExecutionStateChangeDetails{
@@ -758,15 +758,15 @@ func TestFromExecutionModel_Aborted(t *testing.T) {
}
execution, err := FromExecutionModel(context.TODO(), executionModel, DefaultExecutionTransformerOptions)
assert.Nil(t, err)
- assert.Equal(t, core.WorkflowExecution_ABORTED, execution.Closure.Phase)
+ assert.Equal(t, core.WorkflowExecution_ABORTED, execution.GetClosure().GetPhase())
assert.True(t, proto.Equal(&admin.AbortMetadata{
Cause: abortCause,
- }, execution.Closure.GetAbortMetadata()))
+ }, execution.GetClosure().GetAbortMetadata()))
executionModel.Phase = core.WorkflowExecution_RUNNING.String()
execution, err = FromExecutionModel(context.TODO(), executionModel, DefaultExecutionTransformerOptions)
assert.Nil(t, err)
- assert.Empty(t, execution.Closure.GetAbortCause())
+ assert.Empty(t, execution.GetClosure().GetAbortCause())
}
func TestFromExecutionModel_Error(t *testing.T) {
@@ -795,8 +795,8 @@ func TestFromExecutionModel_Error(t *testing.T) {
expectedExecErr := execErr
expectedExecErr.Message = string(make([]byte, trimmedErrMessageLen))
assert.Nil(t, err)
- assert.Equal(t, core.WorkflowExecution_FAILED, execution.Closure.Phase)
- assert.True(t, proto.Equal(expectedExecErr, execution.Closure.GetError()))
+ assert.Equal(t, core.WorkflowExecution_FAILED, execution.GetClosure().GetPhase())
+ assert.True(t, proto.Equal(expectedExecErr, execution.GetClosure().GetError()))
}
func TestFromExecutionModel_ValidUTF8TrimmedErrorMsg(t *testing.T) {
@@ -843,11 +843,11 @@ func TestFromExecutionModel_OverwriteNamespace(t *testing.T) {
DefaultNamespace: overwrittenNamespace,
})
assert.NoError(t, err)
- assert.Equal(t, execution.GetSpec().GetMetadata().GetSystemMetadata().Namespace, overwrittenNamespace)
+ assert.Equal(t, execution.GetSpec().GetMetadata().GetSystemMetadata().GetNamespace(), overwrittenNamespace)
}
func TestFromExecutionModels(t *testing.T) {
- spec := testutils.GetExecutionRequest().Spec
+ spec := testutils.GetExecutionRequest().GetSpec()
specBytes, _ := proto.Marshal(spec)
phase := core.WorkflowExecution_SUCCEEDED.String()
startedAt := time.Date(2018, 8, 30, 0, 0, 0, 0, time.UTC)
@@ -857,7 +857,7 @@ func TestFromExecutionModels(t *testing.T) {
duration := 2 * time.Minute
durationProto := ptypes.DurationProto(duration)
closure := admin.ExecutionClosure{
- ComputedInputs: spec.Inputs,
+ ComputedInputs: spec.GetInputs(),
Phase: core.WorkflowExecution_RUNNING,
StartedAt: startedAtProto,
Duration: durationProto,
@@ -914,7 +914,7 @@ func TestUpdateModelState_WithClusterInformation(t *testing.T) {
Phase: core.WorkflowExecution_UNDEFINED,
CreatedAt: createdAtProto,
}
- spec := testutils.GetExecutionRequest().Spec
+ spec := testutils.GetExecutionRequest().GetSpec()
specBytes, _ := proto.Marshal(spec)
existingClosureBytes, _ := proto.Marshal(&existingClosure)
startedAt := time.Now()
@@ -982,7 +982,7 @@ func TestReassignCluster(t *testing.T) {
}
t.Run("happy case", func(t *testing.T) {
- spec := testutils.GetExecutionRequest().Spec
+ spec := testutils.GetExecutionRequest().GetSpec()
spec.Metadata = &admin.ExecutionMetadata{
SystemMetadata: &admin.SystemMetadata{
ExecutionCluster: oldCluster,
@@ -1000,10 +1000,10 @@ func TestReassignCluster(t *testing.T) {
var updatedSpec admin.ExecutionSpec
err = proto.Unmarshal(executionModel.Spec, &updatedSpec)
assert.NoError(t, err)
- assert.Equal(t, newCluster, updatedSpec.Metadata.SystemMetadata.ExecutionCluster)
+ assert.Equal(t, newCluster, updatedSpec.GetMetadata().GetSystemMetadata().GetExecutionCluster())
})
t.Run("happy case - initialize cluster", func(t *testing.T) {
- spec := testutils.GetExecutionRequest().Spec
+ spec := testutils.GetExecutionRequest().GetSpec()
specBytes, _ := proto.Marshal(spec)
executionModel := models.Execution{
Spec: specBytes,
@@ -1015,7 +1015,7 @@ func TestReassignCluster(t *testing.T) {
var updatedSpec admin.ExecutionSpec
err = proto.Unmarshal(executionModel.Spec, &updatedSpec)
assert.NoError(t, err)
- assert.Equal(t, newCluster, updatedSpec.Metadata.SystemMetadata.ExecutionCluster)
+ assert.Equal(t, newCluster, updatedSpec.GetMetadata().GetSystemMetadata().GetExecutionCluster())
})
t.Run("invalid existing spec", func(t *testing.T) {
executionModel := models.Execution{
@@ -1040,9 +1040,9 @@ func TestGetExecutionStateFromModel(t *testing.T) {
executionStatus, err := PopulateDefaultStateChangeDetails(executionModel)
assert.Nil(t, err)
assert.NotNil(t, executionStatus)
- assert.Equal(t, admin.ExecutionState_EXECUTION_ACTIVE, executionStatus.State)
- assert.NotNil(t, executionStatus.OccurredAt)
- assert.Equal(t, createdAtProto, executionStatus.OccurredAt)
+ assert.Equal(t, admin.ExecutionState_EXECUTION_ACTIVE, executionStatus.GetState())
+ assert.NotNil(t, executionStatus.GetOccurredAt())
+ assert.Equal(t, createdAtProto, executionStatus.GetOccurredAt())
})
t.Run("incorrect created at", func(t *testing.T) {
createdAt := time.Unix(math.MinInt64, math.MinInt32).UTC()
@@ -1072,10 +1072,10 @@ func TestUpdateExecutionModelStateChangeDetails(t *testing.T) {
err = proto.Unmarshal(execModel.Closure, closure)
assert.Nil(t, err)
assert.NotNil(t, closure)
- assert.NotNil(t, closure.StateChangeDetails)
- assert.Equal(t, admin.ExecutionState_EXECUTION_ARCHIVED, closure.StateChangeDetails.State)
- assert.Equal(t, "dummyUser", closure.StateChangeDetails.Principal)
- assert.Equal(t, statetUpdateAtProto, closure.StateChangeDetails.OccurredAt)
+ assert.NotNil(t, closure.GetStateChangeDetails())
+ assert.Equal(t, admin.ExecutionState_EXECUTION_ARCHIVED, closure.GetStateChangeDetails().GetState())
+ assert.Equal(t, "dummyUser", closure.GetStateChangeDetails().GetPrincipal())
+ assert.Equal(t, statetUpdateAtProto, closure.GetStateChangeDetails().GetOccurredAt())
})
t.Run("bad closure", func(t *testing.T) {
diff --git a/flyteadmin/pkg/repositories/transformers/launch_plan.go b/flyteadmin/pkg/repositories/transformers/launch_plan.go
index a7b33736d1..acfa14282a 100644
--- a/flyteadmin/pkg/repositories/transformers/launch_plan.go
+++ b/flyteadmin/pkg/repositories/transformers/launch_plan.go
@@ -16,10 +16,10 @@ func CreateLaunchPlan(
expectedOutputs *core.VariableMap) *admin.LaunchPlan {
return &admin.LaunchPlan{
- Id: request.Id,
- Spec: request.Spec,
+ Id: request.GetId(),
+ Spec: request.GetSpec(),
Closure: &admin.LaunchPlanClosure{
- ExpectedInputs: request.Spec.DefaultInputs,
+ ExpectedInputs: request.GetSpec().GetDefaultInputs(),
ExpectedOutputs: expectedOutputs,
},
}
@@ -31,22 +31,22 @@ func CreateLaunchPlanModel(
workflowRepoID uint,
digest []byte,
initState admin.LaunchPlanState) (models.LaunchPlan, error) {
- spec, err := proto.Marshal(launchPlan.Spec)
+ spec, err := proto.Marshal(launchPlan.GetSpec())
if err != nil {
return models.LaunchPlan{}, errors.NewFlyteAdminError(codes.Internal, "Failed to serialize launch plan spec")
}
- closure, err := proto.Marshal(launchPlan.Closure)
+ closure, err := proto.Marshal(launchPlan.GetClosure())
if err != nil {
return models.LaunchPlan{}, errors.NewFlyteAdminError(codes.Internal, "Failed to serialize launch plan closure")
}
var launchConditionType models.LaunchConditionType
scheduleType := models.LaunchPlanScheduleTypeNONE
- if launchPlan.Spec.EntityMetadata != nil && launchPlan.Spec.EntityMetadata.Schedule != nil {
- if launchPlan.Spec.EntityMetadata.Schedule.GetCronExpression() != "" || launchPlan.Spec.EntityMetadata.Schedule.GetCronSchedule() != nil {
+ if launchPlan.GetSpec().GetEntityMetadata() != nil && launchPlan.GetSpec().GetEntityMetadata().GetSchedule() != nil {
+ if launchPlan.GetSpec().GetEntityMetadata().GetSchedule().GetCronExpression() != "" || launchPlan.GetSpec().GetEntityMetadata().GetSchedule().GetCronSchedule() != nil {
scheduleType = models.LaunchPlanScheduleTypeCRON
launchConditionType = models.LaunchConditionTypeSCHED
- } else if launchPlan.Spec.EntityMetadata.Schedule.GetRate() != nil {
+ } else if launchPlan.GetSpec().GetEntityMetadata().GetSchedule().GetRate() != nil {
scheduleType = models.LaunchPlanScheduleTypeRATE
launchConditionType = models.LaunchConditionTypeSCHED
}
@@ -56,10 +56,10 @@ func CreateLaunchPlanModel(
lpModel := models.LaunchPlan{
LaunchPlanKey: models.LaunchPlanKey{
- Project: launchPlan.Id.Project,
- Domain: launchPlan.Id.Domain,
- Name: launchPlan.Id.Name,
- Version: launchPlan.Id.Version,
+ Project: launchPlan.GetId().GetProject(),
+ Domain: launchPlan.GetId().GetDomain(),
+ Name: launchPlan.GetId().GetName(),
+ Version: launchPlan.GetId().GetVersion(),
},
Spec: spec,
State: &state,
diff --git a/flyteadmin/pkg/repositories/transformers/launch_plan_test.go b/flyteadmin/pkg/repositories/transformers/launch_plan_test.go
index 65846de42b..c4551f89e5 100644
--- a/flyteadmin/pkg/repositories/transformers/launch_plan_test.go
+++ b/flyteadmin/pkg/repositories/transformers/launch_plan_test.go
@@ -39,8 +39,8 @@ func TestCreateLaunchPlan(t *testing.T) {
launchPlan := CreateLaunchPlan(request, expectedOutputs)
assert.True(t, proto.Equal(
&admin.LaunchPlan{
- Id: request.Id,
- Spec: request.Spec,
+ Id: request.GetId(),
+ Spec: request.GetSpec(),
Closure: &admin.LaunchPlanClosure{
ExpectedInputs: expectedInputs,
ExpectedOutputs: expectedOutputs,
@@ -54,8 +54,8 @@ func TestToLaunchPlanModel(t *testing.T) {
launchPlanDigest := []byte("launch plan")
launchPlan := &admin.LaunchPlan{
- Id: lpRequest.Id,
- Spec: lpRequest.Spec,
+ Id: lpRequest.GetId(),
+ Spec: lpRequest.GetSpec(),
Closure: &admin.LaunchPlanClosure{
ExpectedInputs: expectedInputs,
ExpectedOutputs: expectedOutputs,
@@ -70,11 +70,11 @@ func TestToLaunchPlanModel(t *testing.T) {
assert.Equal(t, "version", launchPlanModel.Version)
assert.Equal(t, workflowID, launchPlanModel.WorkflowID)
- expectedSpec, _ := proto.Marshal(lpRequest.Spec)
+ expectedSpec, _ := proto.Marshal(lpRequest.GetSpec())
assert.Equal(t, expectedSpec, launchPlanModel.Spec)
assert.Equal(t, models.LaunchPlanScheduleTypeNONE, launchPlanModel.ScheduleType)
- expectedClosure := launchPlan.Closure
+ expectedClosure := launchPlan.GetClosure()
var actualClosure admin.LaunchPlanClosure
err = proto.Unmarshal(launchPlanModel.Closure, &actualClosure)
@@ -101,8 +101,8 @@ func testLaunchPlanWithCronInternal(t *testing.T, lpRequest *admin.LaunchPlanCre
launchPlanDigest := []byte("launch plan")
launchPlan := &admin.LaunchPlan{
- Id: lpRequest.Id,
- Spec: lpRequest.Spec,
+ Id: lpRequest.GetId(),
+ Spec: lpRequest.GetSpec(),
Closure: &admin.LaunchPlanClosure{
ExpectedInputs: expectedInputs,
ExpectedOutputs: expectedOutputs,
@@ -117,11 +117,11 @@ func testLaunchPlanWithCronInternal(t *testing.T, lpRequest *admin.LaunchPlanCre
assert.Equal(t, "version", launchPlanModel.Version)
assert.Equal(t, workflowID, launchPlanModel.WorkflowID)
- expectedSpec, _ := proto.Marshal(lpRequest.Spec)
+ expectedSpec, _ := proto.Marshal(lpRequest.GetSpec())
assert.Equal(t, expectedSpec, launchPlanModel.Spec)
assert.Equal(t, models.LaunchPlanScheduleTypeCRON, launchPlanModel.ScheduleType)
- expectedClosure := launchPlan.Closure
+ expectedClosure := launchPlan.GetClosure()
var actualClosure admin.LaunchPlanClosure
err = proto.Unmarshal(launchPlanModel.Closure, &actualClosure)
@@ -137,8 +137,8 @@ func TestToLaunchPlanModelWithFixedRateSchedule(t *testing.T) {
launchPlanDigest := []byte("launch plan")
launchPlan := &admin.LaunchPlan{
- Id: lpRequest.Id,
- Spec: lpRequest.Spec,
+ Id: lpRequest.GetId(),
+ Spec: lpRequest.GetSpec(),
Closure: &admin.LaunchPlanClosure{
ExpectedInputs: expectedInputs,
ExpectedOutputs: expectedOutputs,
@@ -153,11 +153,11 @@ func TestToLaunchPlanModelWithFixedRateSchedule(t *testing.T) {
assert.Equal(t, "version", launchPlanModel.Version)
assert.Equal(t, workflowID, launchPlanModel.WorkflowID)
- expectedSpec, _ := proto.Marshal(lpRequest.Spec)
+ expectedSpec, _ := proto.Marshal(lpRequest.GetSpec())
assert.Equal(t, expectedSpec, launchPlanModel.Spec)
assert.Equal(t, models.LaunchPlanScheduleTypeRATE, launchPlanModel.ScheduleType)
- expectedClosure := launchPlan.Closure
+ expectedClosure := launchPlan.GetClosure()
var actualClosure admin.LaunchPlanClosure
err = proto.Unmarshal(launchPlanModel.Closure, &actualClosure)
@@ -174,13 +174,13 @@ func TestFromLaunchPlanModel(t *testing.T) {
updatedAt := createdAt.Add(time.Minute)
updatedAtProto, _ := ptypes.TimestampProto(updatedAt)
closure := admin.LaunchPlanClosure{
- ExpectedInputs: lpRequest.Spec.DefaultInputs,
- ExpectedOutputs: workflowRequest.Spec.Template.Interface.Outputs,
+ ExpectedInputs: lpRequest.GetSpec().GetDefaultInputs(),
+ ExpectedOutputs: workflowRequest.GetSpec().GetTemplate().GetInterface().GetOutputs(),
CreatedAt: createdAtProto,
UpdatedAt: updatedAtProto,
State: admin.LaunchPlanState_ACTIVE,
}
- specBytes, _ := proto.Marshal(lpRequest.Spec)
+ specBytes, _ := proto.Marshal(lpRequest.GetSpec())
closureBytes, _ := proto.Marshal(&closure)
model := models.LaunchPlan{
@@ -206,9 +206,9 @@ func TestFromLaunchPlanModel(t *testing.T) {
Domain: "domain",
Name: "name",
Version: "version",
- }, lp.Id))
- assert.True(t, proto.Equal(&closure, lp.Closure))
- assert.True(t, proto.Equal(lpRequest.Spec, lp.Spec))
+ }, lp.GetId()))
+ assert.True(t, proto.Equal(&closure, lp.GetClosure()))
+ assert.True(t, proto.Equal(lpRequest.GetSpec(), lp.GetSpec()))
}
func TestFromLaunchPlanModels(t *testing.T) {
@@ -220,13 +220,13 @@ func TestFromLaunchPlanModels(t *testing.T) {
updatedAt := createdAt.Add(time.Minute)
updatedAtProto, _ := ptypes.TimestampProto(updatedAt)
closure := admin.LaunchPlanClosure{
- ExpectedInputs: lpRequest.Spec.DefaultInputs,
- ExpectedOutputs: workflowRequest.Spec.Template.Interface.Outputs,
+ ExpectedInputs: lpRequest.GetSpec().GetDefaultInputs(),
+ ExpectedOutputs: workflowRequest.GetSpec().GetTemplate().GetInterface().GetOutputs(),
CreatedAt: createdAtProto,
UpdatedAt: updatedAtProto,
State: admin.LaunchPlanState_ACTIVE,
}
- specBytes, _ := proto.Marshal(lpRequest.Spec)
+ specBytes, _ := proto.Marshal(lpRequest.GetSpec())
closureBytes, _ := proto.Marshal(&closure)
m1 := models.LaunchPlan{
@@ -272,7 +272,7 @@ func TestFromLaunchPlanModels(t *testing.T) {
Domain: "staging",
Name: "othername",
Version: "versionsecond",
- }, lp[1].Id))
- assert.True(t, proto.Equal(&closure, lp[1].Closure))
- assert.True(t, proto.Equal(lpRequest.Spec, lp[1].Spec))
+ }, lp[1].GetId()))
+ assert.True(t, proto.Equal(&closure, lp[1].GetClosure()))
+ assert.True(t, proto.Equal(lpRequest.GetSpec(), lp[1].GetSpec()))
}
diff --git a/flyteadmin/pkg/repositories/transformers/named_entity.go b/flyteadmin/pkg/repositories/transformers/named_entity.go
index 14c5818786..646d673923 100644
--- a/flyteadmin/pkg/repositories/transformers/named_entity.go
+++ b/flyteadmin/pkg/repositories/transformers/named_entity.go
@@ -6,16 +6,16 @@ import (
)
func CreateNamedEntityModel(request *admin.NamedEntityUpdateRequest) models.NamedEntity {
- stateInt := int32(request.Metadata.State)
+ stateInt := int32(request.GetMetadata().GetState())
return models.NamedEntity{
NamedEntityKey: models.NamedEntityKey{
- ResourceType: request.ResourceType,
- Project: request.Id.Project,
- Domain: request.Id.Domain,
- Name: request.Id.Name,
+ ResourceType: request.GetResourceType(),
+ Project: request.GetId().GetProject(),
+ Domain: request.GetId().GetDomain(),
+ Name: request.GetId().GetName(),
},
NamedEntityMetadataFields: models.NamedEntityMetadataFields{
- Description: request.Metadata.Description,
+ Description: request.GetMetadata().GetDescription(),
State: &stateInt,
},
}
diff --git a/flyteadmin/pkg/repositories/transformers/node_execution.go b/flyteadmin/pkg/repositories/transformers/node_execution.go
index 817f53290a..107e9efb70 100644
--- a/flyteadmin/pkg/repositories/transformers/node_execution.go
+++ b/flyteadmin/pkg/repositories/transformers/node_execution.go
@@ -30,7 +30,7 @@ type ToNodeExecutionModelInput struct {
func addNodeRunningState(request *admin.NodeExecutionEventRequest, nodeExecutionModel *models.NodeExecution,
closure *admin.NodeExecutionClosure) error {
- occurredAt, err := ptypes.Timestamp(request.Event.OccurredAt)
+ occurredAt, err := ptypes.Timestamp(request.GetEvent().GetOccurredAt())
if err != nil {
return errors.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal occurredAt with error: %v", err)
}
@@ -49,10 +49,10 @@ func addTerminalState(
ctx context.Context,
request *admin.NodeExecutionEventRequest, nodeExecutionModel *models.NodeExecution,
closure *admin.NodeExecutionClosure, inlineEventDataPolicy interfaces.InlineEventDataPolicy, storageClient *storage.DataStore) error {
- if closure.StartedAt == nil {
+ if closure.GetStartedAt() == nil {
logger.Warning(context.Background(), "node execution is missing StartedAt")
} else {
- endTime, err := ptypes.Timestamp(request.Event.OccurredAt)
+ endTime, err := ptypes.Timestamp(request.GetEvent().GetOccurredAt())
if err != nil {
return errors.NewFlyteAdminErrorf(
codes.Internal, "Failed to parse node execution occurred at timestamp: %v", err)
@@ -62,21 +62,21 @@ func addTerminalState(
}
// Serialize output results (if they exist)
- if request.Event.GetOutputUri() != "" {
+ if request.GetEvent().GetOutputUri() != "" {
closure.OutputResult = &admin.NodeExecutionClosure_OutputUri{
- OutputUri: request.Event.GetOutputUri(),
+ OutputUri: request.GetEvent().GetOutputUri(),
}
- } else if request.Event.GetOutputData() != nil {
+ } else if request.GetEvent().GetOutputData() != nil {
switch inlineEventDataPolicy {
case interfaces.InlineEventDataPolicyStoreInline:
closure.OutputResult = &admin.NodeExecutionClosure_OutputData{
- OutputData: request.Event.GetOutputData(),
+ OutputData: request.GetEvent().GetOutputData(),
}
default:
logger.Debugf(ctx, "Offloading outputs per InlineEventDataPolicy")
- uri, err := common.OffloadLiteralMap(ctx, storageClient, request.Event.GetOutputData(),
- request.Event.Id.ExecutionId.Project, request.Event.Id.ExecutionId.Domain, request.Event.Id.ExecutionId.Name,
- request.Event.Id.NodeId, OutputsObjectSuffix)
+ uri, err := common.OffloadLiteralMap(ctx, storageClient, request.GetEvent().GetOutputData(),
+ request.GetEvent().GetId().GetExecutionId().GetProject(), request.GetEvent().GetId().GetExecutionId().GetDomain(), request.GetEvent().GetId().GetExecutionId().GetName(),
+ request.GetEvent().GetId().GetNodeId(), OutputsObjectSuffix)
if err != nil {
return err
}
@@ -84,15 +84,15 @@ func addTerminalState(
OutputUri: uri.String(),
}
}
- } else if request.Event.GetError() != nil {
+ } else if request.GetEvent().GetError() != nil {
closure.OutputResult = &admin.NodeExecutionClosure_Error{
- Error: request.Event.GetError(),
+ Error: request.GetEvent().GetError(),
}
- k := request.Event.GetError().Kind.String()
+ k := request.GetEvent().GetError().GetKind().String()
nodeExecutionModel.ErrorKind = &k
nodeExecutionModel.ErrorCode = &request.Event.GetError().Code
}
- closure.DeckUri = request.Event.DeckUri
+ closure.DeckUri = request.GetEvent().GetDeckUri()
return nil
}
@@ -100,47 +100,47 @@ func addTerminalState(
func CreateNodeExecutionModel(ctx context.Context, input ToNodeExecutionModelInput) (*models.NodeExecution, error) {
nodeExecution := &models.NodeExecution{
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: input.Request.Event.Id.NodeId,
+ NodeID: input.Request.GetEvent().GetId().GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: input.Request.Event.Id.ExecutionId.Project,
- Domain: input.Request.Event.Id.ExecutionId.Domain,
- Name: input.Request.Event.Id.ExecutionId.Name,
+ Project: input.Request.GetEvent().GetId().GetExecutionId().GetProject(),
+ Domain: input.Request.GetEvent().GetId().GetExecutionId().GetDomain(),
+ Name: input.Request.GetEvent().GetId().GetExecutionId().GetName(),
},
},
- Phase: input.Request.Event.Phase.String(),
+ Phase: input.Request.GetEvent().GetPhase().String(),
}
- reportedAt := input.Request.Event.ReportedAt
- if reportedAt == nil || (reportedAt.Seconds == 0 && reportedAt.Nanos == 0) {
- reportedAt = input.Request.Event.OccurredAt
+ reportedAt := input.Request.GetEvent().GetReportedAt()
+ if reportedAt == nil || (reportedAt.GetSeconds() == 0 && reportedAt.GetNanos() == 0) {
+ reportedAt = input.Request.GetEvent().GetOccurredAt()
}
closure := admin.NodeExecutionClosure{
- Phase: input.Request.Event.Phase,
- CreatedAt: input.Request.Event.OccurredAt,
+ Phase: input.Request.GetEvent().GetPhase(),
+ CreatedAt: input.Request.GetEvent().GetOccurredAt(),
UpdatedAt: reportedAt,
}
nodeExecutionMetadata := admin.NodeExecutionMetaData{
- RetryGroup: input.Request.Event.RetryGroup,
- SpecNodeId: input.Request.Event.SpecNodeId,
- IsParentNode: input.Request.Event.IsParent,
- IsDynamic: input.Request.Event.IsDynamic,
- IsArray: input.Request.Event.IsArray,
+ RetryGroup: input.Request.GetEvent().GetRetryGroup(),
+ SpecNodeId: input.Request.GetEvent().GetSpecNodeId(),
+ IsParentNode: input.Request.GetEvent().GetIsParent(),
+ IsDynamic: input.Request.GetEvent().GetIsDynamic(),
+ IsArray: input.Request.GetEvent().GetIsArray(),
}
err := handleNodeExecutionInputs(ctx, nodeExecution, input.Request, input.StorageClient)
if err != nil {
return nil, err
}
- if input.Request.Event.Phase == core.NodeExecution_RUNNING {
+ if input.Request.GetEvent().GetPhase() == core.NodeExecution_RUNNING {
err := addNodeRunningState(input.Request, nodeExecution, &closure)
if err != nil {
return nil, err
}
}
- if common.IsNodeExecutionTerminal(input.Request.Event.Phase) {
+ if common.IsNodeExecutionTerminal(input.Request.GetEvent().GetPhase()) {
err := addTerminalState(ctx, input.Request, nodeExecution, &closure, input.InlineEventDataPolicy, input.StorageClient)
if err != nil {
return nil, err
@@ -148,16 +148,16 @@ func CreateNodeExecutionModel(ctx context.Context, input ToNodeExecutionModelInp
}
// Update TaskNodeMetadata, which includes caching information today.
- if input.Request.Event.GetTaskNodeMetadata() != nil {
+ if input.Request.GetEvent().GetTaskNodeMetadata() != nil {
targetMetadata := &admin.NodeExecutionClosure_TaskNodeMetadata{
TaskNodeMetadata: &admin.TaskNodeMetadata{
- CheckpointUri: input.Request.Event.GetTaskNodeMetadata().CheckpointUri,
+ CheckpointUri: input.Request.GetEvent().GetTaskNodeMetadata().GetCheckpointUri(),
},
}
- if input.Request.Event.GetTaskNodeMetadata().CatalogKey != nil {
- st := input.Request.Event.GetTaskNodeMetadata().GetCacheStatus().String()
- targetMetadata.TaskNodeMetadata.CacheStatus = input.Request.Event.GetTaskNodeMetadata().GetCacheStatus()
- targetMetadata.TaskNodeMetadata.CatalogKey = input.Request.Event.GetTaskNodeMetadata().GetCatalogKey()
+ if input.Request.GetEvent().GetTaskNodeMetadata().GetCatalogKey() != nil {
+ st := input.Request.GetEvent().GetTaskNodeMetadata().GetCacheStatus().String()
+ targetMetadata.TaskNodeMetadata.CacheStatus = input.Request.GetEvent().GetTaskNodeMetadata().GetCacheStatus()
+ targetMetadata.TaskNodeMetadata.CatalogKey = input.Request.GetEvent().GetTaskNodeMetadata().GetCatalogKey()
nodeExecution.CacheStatus = &st
}
closure.TargetMetadata = targetMetadata
@@ -175,7 +175,7 @@ func CreateNodeExecutionModel(ctx context.Context, input ToNodeExecutionModelInp
}
nodeExecution.Closure = marshaledClosure
nodeExecution.NodeExecutionMetadata = marshaledNodeExecutionMetadata
- nodeExecutionCreatedAt, err := ptypes.Timestamp(input.Request.Event.OccurredAt)
+ nodeExecutionCreatedAt, err := ptypes.Timestamp(input.Request.GetEvent().GetOccurredAt())
if err != nil {
return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to read event timestamp")
}
@@ -185,14 +185,14 @@ func CreateNodeExecutionModel(ctx context.Context, input ToNodeExecutionModelInp
return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to read event reported_at timestamp")
}
nodeExecution.NodeExecutionUpdatedAt = &nodeExecutionUpdatedAt
- if input.Request.Event.ParentTaskMetadata != nil {
+ if input.Request.GetEvent().GetParentTaskMetadata() != nil {
nodeExecution.ParentTaskExecutionID = input.ParentTaskExecutionID
}
nodeExecution.ParentID = input.ParentID
nodeExecution.DynamicWorkflowRemoteClosureReference = input.DynamicWorkflowRemoteClosure
internalData := &genModel.NodeExecutionInternalData{
- EventVersion: input.Request.Event.EventVersion,
+ EventVersion: input.Request.GetEvent().GetEventVersion(),
}
internalDataBytes, err := proto.Marshal(internalData)
if err != nil {
@@ -216,21 +216,21 @@ func UpdateNodeExecutionModel(
return errors.NewFlyteAdminErrorf(codes.Internal,
"failed to unmarshal node execution closure with error: %+v", err)
}
- nodeExecutionModel.Phase = request.Event.Phase.String()
- nodeExecutionClosure.Phase = request.Event.Phase
- reportedAt := request.Event.ReportedAt
- if reportedAt == nil || (reportedAt.Seconds == 0 && reportedAt.Nanos == 0) {
- reportedAt = request.Event.OccurredAt
+ nodeExecutionModel.Phase = request.GetEvent().GetPhase().String()
+ nodeExecutionClosure.Phase = request.GetEvent().GetPhase()
+ reportedAt := request.GetEvent().GetReportedAt()
+ if reportedAt == nil || (reportedAt.GetSeconds() == 0 && reportedAt.GetNanos() == 0) {
+ reportedAt = request.GetEvent().GetOccurredAt()
}
nodeExecutionClosure.UpdatedAt = reportedAt
- if request.Event.Phase == core.NodeExecution_RUNNING {
+ if request.GetEvent().GetPhase() == core.NodeExecution_RUNNING {
err := addNodeRunningState(request, nodeExecutionModel, &nodeExecutionClosure)
if err != nil {
return err
}
}
- if common.IsNodeExecutionTerminal(request.Event.Phase) {
+ if common.IsNodeExecutionTerminal(request.GetEvent().GetPhase()) {
err := addTerminalState(ctx, request, nodeExecutionModel, &nodeExecutionClosure, inlineEventDataPolicy, storageClient)
if err != nil {
return err
@@ -247,24 +247,24 @@ func UpdateNodeExecutionModel(
}
// Update TaskNodeMetadata, which includes caching information today.
- if request.Event.GetTaskNodeMetadata() != nil {
+ if request.GetEvent().GetTaskNodeMetadata() != nil {
targetMetadata := &admin.NodeExecutionClosure_TaskNodeMetadata{
TaskNodeMetadata: &admin.TaskNodeMetadata{
- CheckpointUri: request.Event.GetTaskNodeMetadata().CheckpointUri,
+ CheckpointUri: request.GetEvent().GetTaskNodeMetadata().GetCheckpointUri(),
},
}
- if request.Event.GetTaskNodeMetadata().CatalogKey != nil {
- st := request.Event.GetTaskNodeMetadata().GetCacheStatus().String()
- targetMetadata.TaskNodeMetadata.CacheStatus = request.Event.GetTaskNodeMetadata().GetCacheStatus()
- targetMetadata.TaskNodeMetadata.CatalogKey = request.Event.GetTaskNodeMetadata().GetCatalogKey()
+ if request.GetEvent().GetTaskNodeMetadata().GetCatalogKey() != nil {
+ st := request.GetEvent().GetTaskNodeMetadata().GetCacheStatus().String()
+ targetMetadata.TaskNodeMetadata.CacheStatus = request.GetEvent().GetTaskNodeMetadata().GetCacheStatus()
+ targetMetadata.TaskNodeMetadata.CatalogKey = request.GetEvent().GetTaskNodeMetadata().GetCatalogKey()
nodeExecutionModel.CacheStatus = &st
}
nodeExecutionClosure.TargetMetadata = targetMetadata
// if this is a dynamic task then maintain the DynamicJobSpecUri
- dynamicWorkflowMetadata := request.Event.GetTaskNodeMetadata().DynamicWorkflow
- if dynamicWorkflowMetadata != nil && len(dynamicWorkflowMetadata.DynamicJobSpecUri) > 0 {
- nodeExecutionClosure.DynamicJobSpecUri = dynamicWorkflowMetadata.DynamicJobSpecUri
+ dynamicWorkflowMetadata := request.GetEvent().GetTaskNodeMetadata().GetDynamicWorkflow()
+ if dynamicWorkflowMetadata != nil && len(dynamicWorkflowMetadata.GetDynamicJobSpecUri()) > 0 {
+ nodeExecutionClosure.DynamicJobSpecUri = dynamicWorkflowMetadata.GetDynamicJobSpecUri()
}
}
@@ -284,7 +284,7 @@ func UpdateNodeExecutionModel(
// In the case of dynamic nodes reporting DYNAMIC_RUNNING, the IsParent and IsDynamic bits will be set for this event.
// Update the node execution metadata accordingly.
- if request.Event.IsParent || request.Event.IsDynamic || request.Event.IsArray {
+ if request.GetEvent().GetIsParent() || request.GetEvent().GetIsDynamic() || request.GetEvent().GetIsArray() {
var nodeExecutionMetadata admin.NodeExecutionMetaData
if len(nodeExecutionModel.NodeExecutionMetadata) > 0 {
if err := proto.Unmarshal(nodeExecutionModel.NodeExecutionMetadata, &nodeExecutionMetadata); err != nil {
@@ -294,13 +294,13 @@ func UpdateNodeExecutionModel(
}
// Not every event sends IsParent and IsDynamic as an artifact of how propeller handles dynamic nodes.
// Only explicitly set the fields, when they're set in the event itself.
- if request.Event.IsParent {
+ if request.GetEvent().GetIsParent() {
nodeExecutionMetadata.IsParentNode = true
}
- if request.Event.IsDynamic {
+ if request.GetEvent().GetIsDynamic() {
nodeExecutionMetadata.IsDynamic = true
}
- if request.Event.IsArray {
+ if request.GetEvent().GetIsArray() {
nodeExecutionMetadata.IsArray = true
}
nodeExecMetadataBytes, err := proto.Marshal(&nodeExecutionMetadata)
@@ -321,7 +321,7 @@ func FromNodeExecutionModel(nodeExecutionModel models.NodeExecution, opts *Execu
return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal closure")
}
- if closure.GetError() != nil && opts != nil && opts.TrimErrorMessage && len(closure.GetError().Message) > 0 {
+ if closure.GetError() != nil && opts != nil && opts.TrimErrorMessage && len(closure.GetError().GetMessage()) > 0 {
trimmedErrOutputResult := closure.GetError()
trimmedErrMessage := TrimErrorMessage(trimmedErrOutputResult.GetMessage())
trimmedErrOutputResult.Message = trimmedErrMessage
@@ -337,7 +337,7 @@ func FromNodeExecutionModel(nodeExecutionModel models.NodeExecution, opts *Execu
}
// TODO: delete this block and references to preloading child node executions no earlier than Q3 2022
// This is required for historical reasons because propeller did not always send IsParent or IsDynamic in events.
- if !(nodeExecutionMetadata.IsParentNode || nodeExecutionMetadata.IsDynamic) {
+ if !(nodeExecutionMetadata.GetIsParentNode() || nodeExecutionMetadata.GetIsDynamic()) {
if len(nodeExecutionModel.ChildNodeExecutions) > 0 {
nodeExecutionMetadata.IsParentNode = true
if len(nodeExecutionModel.DynamicWorkflowRemoteClosureReference) > 0 {
@@ -380,14 +380,14 @@ func handleNodeExecutionInputs(ctx context.Context,
// Inputs are static over the duration of the node execution, no need to update them when they're already set
return nil
}
- switch request.Event.GetInputValue().(type) {
+ switch request.GetEvent().GetInputValue().(type) {
case *event.NodeExecutionEvent_InputUri:
- logger.Debugf(ctx, "saving node execution input URI [%s]", request.Event.GetInputUri())
- nodeExecutionModel.InputURI = request.Event.GetInputUri()
+ logger.Debugf(ctx, "saving node execution input URI [%s]", request.GetEvent().GetInputUri())
+ nodeExecutionModel.InputURI = request.GetEvent().GetInputUri()
case *event.NodeExecutionEvent_InputData:
- uri, err := common.OffloadLiteralMap(ctx, storageClient, request.Event.GetInputData(),
- request.Event.Id.ExecutionId.Project, request.Event.Id.ExecutionId.Domain, request.Event.Id.ExecutionId.Name,
- request.Event.Id.NodeId, InputsObjectSuffix)
+ uri, err := common.OffloadLiteralMap(ctx, storageClient, request.GetEvent().GetInputData(),
+ request.GetEvent().GetId().GetExecutionId().GetProject(), request.GetEvent().GetId().GetExecutionId().GetDomain(), request.GetEvent().GetId().GetExecutionId().GetName(),
+ request.GetEvent().GetId().GetNodeId(), InputsObjectSuffix)
if err != nil {
return err
}
diff --git a/flyteadmin/pkg/repositories/transformers/node_execution_event.go b/flyteadmin/pkg/repositories/transformers/node_execution_event.go
index c817054499..88c4d8be0e 100644
--- a/flyteadmin/pkg/repositories/transformers/node_execution_event.go
+++ b/flyteadmin/pkg/repositories/transformers/node_execution_event.go
@@ -11,21 +11,21 @@ import (
// Transforms a NodeExecutionEventRequest to a NodeExecutionEvent model
func CreateNodeExecutionEventModel(request *admin.NodeExecutionEventRequest) (*models.NodeExecutionEvent, error) {
- occurredAt, err := ptypes.Timestamp(request.Event.OccurredAt)
+ occurredAt, err := ptypes.Timestamp(request.GetEvent().GetOccurredAt())
if err != nil {
return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to marshal occurred at timestamp")
}
return &models.NodeExecutionEvent{
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: request.Event.Id.NodeId,
+ NodeID: request.GetEvent().GetId().GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: request.Event.Id.ExecutionId.Project,
- Domain: request.Event.Id.ExecutionId.Domain,
- Name: request.Event.Id.ExecutionId.Name,
+ Project: request.GetEvent().GetId().GetExecutionId().GetProject(),
+ Domain: request.GetEvent().GetId().GetExecutionId().GetDomain(),
+ Name: request.GetEvent().GetId().GetExecutionId().GetName(),
},
},
- RequestID: request.RequestId,
+ RequestID: request.GetRequestId(),
OccurredAt: occurredAt,
- Phase: request.Event.Phase.String(),
+ Phase: request.GetEvent().GetPhase().String(),
}, nil
}
diff --git a/flyteadmin/pkg/repositories/transformers/node_execution_test.go b/flyteadmin/pkg/repositories/transformers/node_execution_test.go
index a52c8e76a3..e37d312612 100644
--- a/flyteadmin/pkg/repositories/transformers/node_execution_test.go
+++ b/flyteadmin/pkg/repositories/transformers/node_execution_test.go
@@ -72,7 +72,7 @@ func TestAddRunningState(t *testing.T) {
err := addNodeRunningState(&request, &nodeExecutionModel, &closure)
assert.Nil(t, err)
assert.Equal(t, startedAt, *nodeExecutionModel.StartedAt)
- assert.True(t, proto.Equal(startedAtProto, closure.StartedAt))
+ assert.True(t, proto.Equal(startedAtProto, closure.GetStartedAt()))
}
func TestAddTerminalState_OutputURI(t *testing.T) {
@@ -251,9 +251,9 @@ func TestCreateNodeExecutionModel(t *testing.T) {
UpdatedAt: occurredAtProto,
TargetMetadata: &admin.NodeExecutionClosure_TaskNodeMetadata{
TaskNodeMetadata: &admin.TaskNodeMetadata{
- CacheStatus: request.Event.GetTaskNodeMetadata().CacheStatus,
- CatalogKey: request.Event.GetTaskNodeMetadata().CatalogKey,
- CheckpointUri: request.Event.GetTaskNodeMetadata().CheckpointUri,
+ CacheStatus: request.GetEvent().GetTaskNodeMetadata().GetCacheStatus(),
+ CatalogKey: request.GetEvent().GetTaskNodeMetadata().GetCatalogKey(),
+ CheckpointUri: request.GetEvent().GetTaskNodeMetadata().GetCheckpointUri(),
},
},
}
@@ -266,7 +266,7 @@ func TestCreateNodeExecutionModel(t *testing.T) {
EventVersion: 2,
}
internalDataBytes, _ := proto.Marshal(internalData)
- cacheStatus := request.Event.GetTaskNodeMetadata().CacheStatus.String()
+ cacheStatus := request.GetEvent().GetTaskNodeMetadata().GetCacheStatus().String()
assert.Equal(t, &models.NodeExecution{
NodeExecutionKey: models.NodeExecutionKey{
NodeID: "node id",
@@ -383,7 +383,7 @@ func TestUpdateNodeExecutionModel(t *testing.T) {
assert.Equal(t, occurredAt, *nodeExecutionModel.StartedAt)
assert.EqualValues(t, occurredAt, *nodeExecutionModel.NodeExecutionUpdatedAt)
assert.NotNil(t, nodeExecutionModel.CacheStatus)
- assert.Equal(t, *nodeExecutionModel.CacheStatus, request.Event.GetTaskNodeMetadata().CacheStatus.String())
+ assert.Equal(t, *nodeExecutionModel.CacheStatus, request.GetEvent().GetTaskNodeMetadata().GetCacheStatus().String())
assert.Equal(t, nodeExecutionModel.DynamicWorkflowRemoteClosureReference, dynamicWorkflowClosureRef)
var closure = &admin.NodeExecutionClosure{
@@ -392,12 +392,12 @@ func TestUpdateNodeExecutionModel(t *testing.T) {
UpdatedAt: occurredAtProto,
TargetMetadata: &admin.NodeExecutionClosure_TaskNodeMetadata{
TaskNodeMetadata: &admin.TaskNodeMetadata{
- CacheStatus: request.Event.GetTaskNodeMetadata().CacheStatus,
- CatalogKey: request.Event.GetTaskNodeMetadata().CatalogKey,
- CheckpointUri: request.Event.GetTaskNodeMetadata().CheckpointUri,
+ CacheStatus: request.GetEvent().GetTaskNodeMetadata().GetCacheStatus(),
+ CatalogKey: request.GetEvent().GetTaskNodeMetadata().GetCatalogKey(),
+ CheckpointUri: request.GetEvent().GetTaskNodeMetadata().GetCheckpointUri(),
},
},
- DynamicJobSpecUri: request.Event.GetTaskNodeMetadata().DynamicWorkflow.DynamicJobSpecUri,
+ DynamicJobSpecUri: request.GetEvent().GetTaskNodeMetadata().GetDynamicWorkflow().GetDynamicJobSpecUri(),
}
var closureBytes, _ = proto.Marshal(closure)
assert.Equal(t, nodeExecutionModel.Closure, closureBytes)
@@ -553,7 +553,7 @@ func TestFromNodeExecutionModel_Error(t *testing.T) {
expectedExecErr := execErr
expectedExecErr.Message = string(make([]byte, trimmedErrMessageLen))
assert.Nil(t, err)
- assert.True(t, proto.Equal(expectedExecErr, nodeExecution.Closure.GetError()))
+ assert.True(t, proto.Equal(expectedExecErr, nodeExecution.GetClosure().GetError()))
}
func TestFromNodeExecutionModelWithChildren(t *testing.T) {
diff --git a/flyteadmin/pkg/repositories/transformers/project.go b/flyteadmin/pkg/repositories/transformers/project.go
index e1e6e90f84..b9690cecc5 100644
--- a/flyteadmin/pkg/repositories/transformers/project.go
+++ b/flyteadmin/pkg/repositories/transformers/project.go
@@ -14,12 +14,12 @@ type CreateProjectModelInput struct {
}
func CreateProjectModel(project *admin.Project) models.Project {
- stateInt := int32(project.State)
- if project.Labels == nil {
+ stateInt := int32(project.GetState())
+ if project.GetLabels() == nil {
return models.Project{
- Identifier: project.Id,
- Name: project.Name,
- Description: project.Description,
+ Identifier: project.GetId(),
+ Name: project.GetName(),
+ Description: project.GetDescription(),
State: &stateInt,
}
}
@@ -28,9 +28,9 @@ func CreateProjectModel(project *admin.Project) models.Project {
return models.Project{}
}
return models.Project{
- Identifier: project.Id,
- Name: project.Name,
- Description: project.Description,
+ Identifier: project.GetId(),
+ Name: project.GetName(),
+ Description: project.GetDescription(),
Labels: projectBytes,
State: &stateInt,
}
@@ -46,7 +46,7 @@ func FromProjectModel(projectModel models.Project, domains []*admin.Domain) *adm
Id: projectModel.Identifier,
Name: projectModel.Name,
Description: projectModel.Description,
- Labels: projectDeserialized.Labels,
+ Labels: projectDeserialized.GetLabels(),
State: admin.Project_ProjectState(*projectModel.State),
}
project.Domains = domains
diff --git a/flyteadmin/pkg/repositories/transformers/project_test.go b/flyteadmin/pkg/repositories/transformers/project_test.go
index 914194f1dc..d9bedb2038 100644
--- a/flyteadmin/pkg/repositories/transformers/project_test.go
+++ b/flyteadmin/pkg/repositories/transformers/project_test.go
@@ -95,10 +95,10 @@ func TestFromProjectModels(t *testing.T) {
projects := FromProjectModels(projectModels, domains)
assert.Len(t, projects, 2)
for index, project := range projects {
- assert.Equal(t, fmt.Sprintf("proj%v_id", index+1), project.Id)
- assert.Equal(t, fmt.Sprintf("proj%v_name", index+1), project.Name)
- assert.Equal(t, fmt.Sprintf("proj%v_description", index+1), project.Description)
- assert.Equal(t, admin.Project_ACTIVE, project.State)
- assert.EqualValues(t, domains, project.Domains)
+ assert.Equal(t, fmt.Sprintf("proj%v_id", index+1), project.GetId())
+ assert.Equal(t, fmt.Sprintf("proj%v_name", index+1), project.GetName())
+ assert.Equal(t, fmt.Sprintf("proj%v_description", index+1), project.GetDescription())
+ assert.Equal(t, admin.Project_ACTIVE, project.GetState())
+ assert.EqualValues(t, domains, project.GetDomains())
}
}
diff --git a/flyteadmin/pkg/repositories/transformers/resource.go b/flyteadmin/pkg/repositories/transformers/resource.go
index 36b5ddbd6a..4b4a226c92 100644
--- a/flyteadmin/pkg/repositories/transformers/resource.go
+++ b/flyteadmin/pkg/repositories/transformers/resource.go
@@ -14,14 +14,14 @@ import (
)
func WorkflowAttributesToResourceModel(attributes *admin.WorkflowAttributes, resource admin.MatchableResource) (models.Resource, error) {
- attributeBytes, err := proto.Marshal(attributes.MatchingAttributes)
+ attributeBytes, err := proto.Marshal(attributes.GetMatchingAttributes())
if err != nil {
return models.Resource{}, err
}
return models.Resource{
- Project: attributes.Project,
- Domain: attributes.Domain,
- Workflow: attributes.Workflow,
+ Project: attributes.GetProject(),
+ Domain: attributes.GetDomain(),
+ Workflow: attributes.GetWorkflow(),
ResourceType: resource.String(),
Priority: models.ResourcePriorityWorkflowLevel,
Attributes: attributeBytes,
@@ -31,15 +31,15 @@ func WorkflowAttributesToResourceModel(attributes *admin.WorkflowAttributes, res
func mergeUpdatePluginOverrides(existingAttributes *admin.MatchingAttributes,
newMatchingAttributes *admin.MatchingAttributes) *admin.MatchingAttributes {
taskPluginOverrides := make(map[string]*admin.PluginOverride)
- if existingAttributes.GetPluginOverrides() != nil && len(existingAttributes.GetPluginOverrides().Overrides) > 0 {
- for _, pluginOverride := range existingAttributes.GetPluginOverrides().Overrides {
- taskPluginOverrides[pluginOverride.TaskType] = pluginOverride
+ if existingAttributes.GetPluginOverrides() != nil && len(existingAttributes.GetPluginOverrides().GetOverrides()) > 0 {
+ for _, pluginOverride := range existingAttributes.GetPluginOverrides().GetOverrides() {
+ taskPluginOverrides[pluginOverride.GetTaskType()] = pluginOverride
}
}
if newMatchingAttributes.GetPluginOverrides() != nil &&
- len(newMatchingAttributes.GetPluginOverrides().Overrides) > 0 {
- for _, pluginOverride := range newMatchingAttributes.GetPluginOverrides().Overrides {
- taskPluginOverrides[pluginOverride.TaskType] = pluginOverride
+ len(newMatchingAttributes.GetPluginOverrides().GetOverrides()) > 0 {
+ for _, pluginOverride := range newMatchingAttributes.GetPluginOverrides().GetOverrides() {
+ taskPluginOverrides[pluginOverride.GetTaskType()] = pluginOverride
}
}
@@ -99,13 +99,13 @@ func FromResourceModelToWorkflowAttributes(model models.Resource) (admin.Workflo
}
func ProjectDomainAttributesToResourceModel(attributes *admin.ProjectDomainAttributes, resource admin.MatchableResource) (models.Resource, error) {
- attributeBytes, err := proto.Marshal(attributes.MatchingAttributes)
+ attributeBytes, err := proto.Marshal(attributes.GetMatchingAttributes())
if err != nil {
return models.Resource{}, err
}
return models.Resource{
- Project: attributes.Project,
- Domain: attributes.Domain,
+ Project: attributes.GetProject(),
+ Domain: attributes.GetDomain(),
ResourceType: resource.String(),
Priority: models.ResourcePriorityProjectDomainLevel,
Attributes: attributeBytes,
@@ -113,12 +113,12 @@ func ProjectDomainAttributesToResourceModel(attributes *admin.ProjectDomainAttri
}
func ProjectAttributesToResourceModel(attributes *admin.ProjectAttributes, resource admin.MatchableResource) (models.Resource, error) {
- attributeBytes, err := proto.Marshal(attributes.MatchingAttributes)
+ attributeBytes, err := proto.Marshal(attributes.GetMatchingAttributes())
if err != nil {
return models.Resource{}, err
}
return models.Resource{
- Project: attributes.Project,
+ Project: attributes.GetProject(),
ResourceType: resource.String(),
Priority: models.ResourcePriorityProjectLevel,
Attributes: attributeBytes,
diff --git a/flyteadmin/pkg/repositories/transformers/resource_test.go b/flyteadmin/pkg/repositories/transformers/resource_test.go
index 6efcc89fc0..a1ef2cacef 100644
--- a/flyteadmin/pkg/repositories/transformers/resource_test.go
+++ b/flyteadmin/pkg/repositories/transformers/resource_test.go
@@ -97,15 +97,15 @@ func TestMergeUpdateProjectDomainAttributes(t *testing.T) {
assert.NoError(t, err)
var sawPythonTask, sawSidecarTask, sawHiveTask bool
for _, override := range updatedAttributes.GetPluginOverrides().GetOverrides() {
- if override.TaskType == "python" {
+ if override.GetTaskType() == "python" {
sawPythonTask = true
- assert.EqualValues(t, []string{"plugin_a"}, override.PluginId)
- } else if override.TaskType == "sidecar" {
+ assert.EqualValues(t, []string{"plugin_a"}, override.GetPluginId())
+ } else if override.GetTaskType() == "sidecar" {
sawSidecarTask = true
- assert.EqualValues(t, []string{"plugin_c"}, override.PluginId)
- } else if override.TaskType == "hive" {
+ assert.EqualValues(t, []string{"plugin_c"}, override.GetPluginId())
+ } else if override.GetTaskType() == "hive" {
sawHiveTask = true
- assert.EqualValues(t, []string{"plugin_d"}, override.PluginId)
+ assert.EqualValues(t, []string{"plugin_d"}, override.GetPluginId())
}
}
assert.True(t, sawPythonTask, "Missing python task from finalized attributes")
@@ -194,15 +194,15 @@ func TestMergeUpdateWorkflowAttributes(t *testing.T) {
assert.NoError(t, err)
var sawPythonTask, sawSidecarTask, sawHiveTask bool
for _, override := range updatedAttributes.GetPluginOverrides().GetOverrides() {
- if override.TaskType == "python" {
+ if override.GetTaskType() == "python" {
sawPythonTask = true
- assert.EqualValues(t, []string{"plugin_a"}, override.PluginId)
- } else if override.TaskType == "sidecar" {
+ assert.EqualValues(t, []string{"plugin_a"}, override.GetPluginId())
+ } else if override.GetTaskType() == "sidecar" {
sawSidecarTask = true
- assert.EqualValues(t, []string{"plugin_c"}, override.PluginId)
- } else if override.TaskType == "hive" {
+ assert.EqualValues(t, []string{"plugin_c"}, override.GetPluginId())
+ } else if override.GetTaskType() == "hive" {
sawHiveTask = true
- assert.EqualValues(t, []string{"plugin_d"}, override.PluginId)
+ assert.EqualValues(t, []string{"plugin_d"}, override.GetPluginId())
}
}
assert.True(t, sawPythonTask, "Missing python task from finalized attributes")
diff --git a/flyteadmin/pkg/repositories/transformers/signal.go b/flyteadmin/pkg/repositories/transformers/signal.go
index bbef0a00eb..5cb1b37ef4 100644
--- a/flyteadmin/pkg/repositories/transformers/signal.go
+++ b/flyteadmin/pkg/repositories/transformers/signal.go
@@ -14,21 +14,21 @@ func CreateSignalModel(signalID *core.SignalIdentifier, signalType *core.Literal
signalModel := models.Signal{}
if signalID != nil {
signalKey := &signalModel.SignalKey
- if signalID.ExecutionId != nil {
+ if signalID.GetExecutionId() != nil {
executionKey := &signalKey.ExecutionKey
- if len(signalID.ExecutionId.Project) > 0 {
- executionKey.Project = signalID.ExecutionId.Project
+ if len(signalID.GetExecutionId().GetProject()) > 0 {
+ executionKey.Project = signalID.GetExecutionId().GetProject()
}
- if len(signalID.ExecutionId.Domain) > 0 {
- executionKey.Domain = signalID.ExecutionId.Domain
+ if len(signalID.GetExecutionId().GetDomain()) > 0 {
+ executionKey.Domain = signalID.GetExecutionId().GetDomain()
}
- if len(signalID.ExecutionId.Name) > 0 {
- executionKey.Name = signalID.ExecutionId.Name
+ if len(signalID.GetExecutionId().GetName()) > 0 {
+ executionKey.Name = signalID.GetExecutionId().GetName()
}
}
- if len(signalID.SignalId) > 0 {
- signalKey.SignalID = signalID.SignalId
+ if len(signalID.GetSignalId()) > 0 {
+ signalKey.SignalID = signalID.GetSignalId()
}
}
diff --git a/flyteadmin/pkg/repositories/transformers/signal_test.go b/flyteadmin/pkg/repositories/transformers/signal_test.go
index a54d5f1437..82637e06f3 100644
--- a/flyteadmin/pkg/repositories/transformers/signal_test.go
+++ b/flyteadmin/pkg/repositories/transformers/signal_test.go
@@ -82,7 +82,7 @@ func TestCreateSignalModel(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
- signalModel, err := CreateSignalModel(test.proto.Id, test.proto.Type, test.proto.Value)
+ signalModel, err := CreateSignalModel(test.proto.GetId(), test.proto.GetType(), test.proto.GetValue())
assert.NoError(t, err)
assert.Equal(t, test.model, signalModel)
diff --git a/flyteadmin/pkg/repositories/transformers/task.go b/flyteadmin/pkg/repositories/transformers/task.go
index a8baf355e7..6c64ee2ec0 100644
--- a/flyteadmin/pkg/repositories/transformers/task.go
+++ b/flyteadmin/pkg/repositories/transformers/task.go
@@ -22,15 +22,15 @@ func CreateTaskModel(
return models.Task{}, errors.NewFlyteAdminError(codes.Internal, "Failed to serialize task closure")
}
var taskType string
- if taskClosure.CompiledTask != nil && taskClosure.CompiledTask.Template != nil {
- taskType = taskClosure.CompiledTask.Template.Type
+ if taskClosure.GetCompiledTask() != nil && taskClosure.GetCompiledTask().GetTemplate() != nil {
+ taskType = taskClosure.GetCompiledTask().GetTemplate().GetType()
}
return models.Task{
TaskKey: models.TaskKey{
- Project: request.Id.Project,
- Domain: request.Id.Domain,
- Name: request.Id.Name,
- Version: request.Id.Version,
+ Project: request.GetId().GetProject(),
+ Domain: request.GetId().GetDomain(),
+ Name: request.GetId().GetName(),
+ Version: request.GetId().GetVersion(),
},
Closure: closureBytes,
Digest: digest,
diff --git a/flyteadmin/pkg/repositories/transformers/task_execution.go b/flyteadmin/pkg/repositories/transformers/task_execution.go
index 9f24ed2aa4..354291cd64 100644
--- a/flyteadmin/pkg/repositories/transformers/task_execution.go
+++ b/flyteadmin/pkg/repositories/transformers/task_execution.go
@@ -34,7 +34,7 @@ type CreateTaskExecutionModelInput struct {
func addTaskStartedState(request *admin.TaskExecutionEventRequest, taskExecutionModel *models.TaskExecution,
closure *admin.TaskExecutionClosure) error {
- occurredAt, err := ptypes.Timestamp(request.Event.OccurredAt)
+ occurredAt, err := ptypes.Timestamp(request.GetEvent().GetOccurredAt())
if err != nil {
return errors.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal occurredAt with error: %v", err)
}
@@ -43,7 +43,7 @@ func addTaskStartedState(request *admin.TaskExecutionEventRequest, taskExecution
// This check makes sure any out of order
if taskExecutionModel.StartedAt == nil {
taskExecutionModel.StartedAt = &occurredAt
- closure.StartedAt = request.Event.OccurredAt
+ closure.StartedAt = request.GetEvent().GetOccurredAt()
}
return nil
}
@@ -56,7 +56,7 @@ func addTaskTerminalState(
if taskExecutionModel.StartedAt == nil {
logger.Warning(context.Background(), "task execution is missing StartedAt")
} else {
- endTime, err := ptypes.Timestamp(request.Event.OccurredAt)
+ endTime, err := ptypes.Timestamp(request.GetEvent().GetOccurredAt())
if err != nil {
return errors.NewFlyteAdminErrorf(
codes.Internal, "Failed to parse task execution occurredAt timestamp: %v", err)
@@ -70,23 +70,23 @@ func addTaskTerminalState(
closure.Duration = ptypes.DurationProto(taskExecutionModel.Duration)
}
- if request.Event.GetOutputUri() != "" {
+ if request.GetEvent().GetOutputUri() != "" {
closure.OutputResult = &admin.TaskExecutionClosure_OutputUri{
- OutputUri: request.Event.GetOutputUri(),
+ OutputUri: request.GetEvent().GetOutputUri(),
}
- } else if request.Event.GetOutputData() != nil {
+ } else if request.GetEvent().GetOutputData() != nil {
switch inlineEventDataPolicy {
case interfaces.InlineEventDataPolicyStoreInline:
closure.OutputResult = &admin.TaskExecutionClosure_OutputData{
- OutputData: request.Event.GetOutputData(),
+ OutputData: request.GetEvent().GetOutputData(),
}
default:
logger.Debugf(ctx, "Offloading outputs per InlineEventDataPolicy")
- uri, err := common.OffloadLiteralMap(ctx, storageClient, request.Event.GetOutputData(),
- request.Event.ParentNodeExecutionId.ExecutionId.Project, request.Event.ParentNodeExecutionId.ExecutionId.Domain,
- request.Event.ParentNodeExecutionId.ExecutionId.Name, request.Event.ParentNodeExecutionId.NodeId,
- request.Event.TaskId.Project, request.Event.TaskId.Domain, request.Event.TaskId.Name, request.Event.TaskId.Version,
- strconv.FormatUint(uint64(request.Event.RetryAttempt), 10), OutputsObjectSuffix)
+ uri, err := common.OffloadLiteralMap(ctx, storageClient, request.GetEvent().GetOutputData(),
+ request.GetEvent().GetParentNodeExecutionId().GetExecutionId().GetProject(), request.GetEvent().GetParentNodeExecutionId().GetExecutionId().GetDomain(),
+ request.GetEvent().GetParentNodeExecutionId().GetExecutionId().GetName(), request.GetEvent().GetParentNodeExecutionId().GetNodeId(),
+ request.GetEvent().GetTaskId().GetProject(), request.GetEvent().GetTaskId().GetDomain(), request.GetEvent().GetTaskId().GetName(), request.GetEvent().GetTaskId().GetVersion(),
+ strconv.FormatUint(uint64(request.GetEvent().GetRetryAttempt()), 10), OutputsObjectSuffix)
if err != nil {
return err
}
@@ -94,9 +94,9 @@ func addTaskTerminalState(
OutputUri: uri.String(),
}
}
- } else if request.Event.GetError() != nil {
+ } else if request.GetEvent().GetError() != nil {
closure.OutputResult = &admin.TaskExecutionClosure_Error{
- Error: request.Event.GetError(),
+ Error: request.GetEvent().GetError(),
}
}
return nil
@@ -106,35 +106,35 @@ func CreateTaskExecutionModel(ctx context.Context, input CreateTaskExecutionMode
taskExecution := &models.TaskExecution{
TaskExecutionKey: models.TaskExecutionKey{
TaskKey: models.TaskKey{
- Project: input.Request.Event.TaskId.Project,
- Domain: input.Request.Event.TaskId.Domain,
- Name: input.Request.Event.TaskId.Name,
- Version: input.Request.Event.TaskId.Version,
+ Project: input.Request.GetEvent().GetTaskId().GetProject(),
+ Domain: input.Request.GetEvent().GetTaskId().GetDomain(),
+ Name: input.Request.GetEvent().GetTaskId().GetName(),
+ Version: input.Request.GetEvent().GetTaskId().GetVersion(),
},
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: input.Request.Event.ParentNodeExecutionId.NodeId,
+ NodeID: input.Request.GetEvent().GetParentNodeExecutionId().GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: input.Request.Event.ParentNodeExecutionId.ExecutionId.Project,
- Domain: input.Request.Event.ParentNodeExecutionId.ExecutionId.Domain,
- Name: input.Request.Event.ParentNodeExecutionId.ExecutionId.Name,
+ Project: input.Request.GetEvent().GetParentNodeExecutionId().GetExecutionId().GetProject(),
+ Domain: input.Request.GetEvent().GetParentNodeExecutionId().GetExecutionId().GetDomain(),
+ Name: input.Request.GetEvent().GetParentNodeExecutionId().GetExecutionId().GetName(),
},
},
RetryAttempt: &input.Request.Event.RetryAttempt,
},
- Phase: input.Request.Event.Phase.String(),
- PhaseVersion: input.Request.Event.PhaseVersion,
+ Phase: input.Request.GetEvent().GetPhase().String(),
+ PhaseVersion: input.Request.GetEvent().GetPhaseVersion(),
}
err := handleTaskExecutionInputs(ctx, taskExecution, input.Request, input.StorageClient)
if err != nil {
return nil, err
}
- metadata := input.Request.Event.Metadata
- if metadata != nil && len(metadata.ExternalResources) > 1 {
- sort.Slice(metadata.ExternalResources, func(i, j int) bool {
- a := metadata.ExternalResources[i]
- b := metadata.ExternalResources[j]
+ metadata := input.Request.GetEvent().GetMetadata()
+ if metadata != nil && len(metadata.GetExternalResources()) > 1 {
+ sort.Slice(metadata.GetExternalResources(), func(i, j int) bool {
+ a := metadata.GetExternalResources()[i]
+ b := metadata.GetExternalResources()[j]
if a.GetIndex() == b.GetIndex() {
return a.GetRetryAttempt() < b.GetRetryAttempt()
}
@@ -142,41 +142,41 @@ func CreateTaskExecutionModel(ctx context.Context, input CreateTaskExecutionMode
})
}
- reportedAt := input.Request.Event.ReportedAt
- if reportedAt == nil || (reportedAt.Seconds == 0 && reportedAt.Nanos == 0) {
- reportedAt = input.Request.Event.OccurredAt
+ reportedAt := input.Request.GetEvent().GetReportedAt()
+ if reportedAt == nil || (reportedAt.GetSeconds() == 0 && reportedAt.GetNanos() == 0) {
+ reportedAt = input.Request.GetEvent().GetOccurredAt()
}
closure := &admin.TaskExecutionClosure{
- Phase: input.Request.Event.Phase,
+ Phase: input.Request.GetEvent().GetPhase(),
UpdatedAt: reportedAt,
- CreatedAt: input.Request.Event.OccurredAt,
- Logs: input.Request.Event.Logs,
- CustomInfo: input.Request.Event.CustomInfo,
- TaskType: input.Request.Event.TaskType,
+ CreatedAt: input.Request.GetEvent().GetOccurredAt(),
+ Logs: input.Request.GetEvent().GetLogs(),
+ CustomInfo: input.Request.GetEvent().GetCustomInfo(),
+ TaskType: input.Request.GetEvent().GetTaskType(),
Metadata: metadata,
- EventVersion: input.Request.Event.EventVersion,
+ EventVersion: input.Request.GetEvent().GetEventVersion(),
}
- if len(input.Request.Event.Reasons) > 0 {
- for _, reason := range input.Request.Event.Reasons {
- closure.Reasons = append(closure.Reasons, &admin.Reason{
- OccurredAt: reason.OccurredAt,
- Message: reason.Reason,
+ if len(input.Request.GetEvent().GetReasons()) > 0 {
+ for _, reason := range input.Request.GetEvent().GetReasons() {
+ closure.Reasons = append(closure.GetReasons(), &admin.Reason{
+ OccurredAt: reason.GetOccurredAt(),
+ Message: reason.GetReason(),
})
}
- closure.Reason = input.Request.Event.Reasons[len(input.Request.Event.Reasons)-1].Reason
- } else if len(input.Request.Event.Reason) > 0 {
+ closure.Reason = input.Request.GetEvent().GetReasons()[len(input.Request.GetEvent().GetReasons())-1].GetReason()
+ } else if len(input.Request.GetEvent().GetReason()) > 0 {
closure.Reasons = []*admin.Reason{
{
- OccurredAt: input.Request.Event.OccurredAt,
- Message: input.Request.Event.Reason,
+ OccurredAt: input.Request.GetEvent().GetOccurredAt(),
+ Message: input.Request.GetEvent().GetReason(),
},
}
- closure.Reason = input.Request.Event.Reason
+ closure.Reason = input.Request.GetEvent().GetReason()
}
- eventPhase := input.Request.Event.Phase
+ eventPhase := input.Request.GetEvent().GetPhase()
// Different tasks may report different phases as their first event.
// If the first event we receive for this execution is a valid
@@ -188,7 +188,7 @@ func CreateTaskExecutionModel(ctx context.Context, input CreateTaskExecutionMode
}
}
- if common.IsTaskExecutionTerminal(input.Request.Event.Phase) {
+ if common.IsTaskExecutionTerminal(input.Request.GetEvent().GetPhase()) {
err := addTaskTerminalState(ctx, input.Request, taskExecution, closure, input.InlineEventDataPolicy, input.StorageClient)
if err != nil {
return nil, err
@@ -201,7 +201,7 @@ func CreateTaskExecutionModel(ctx context.Context, input CreateTaskExecutionMode
}
taskExecution.Closure = marshaledClosure
- taskExecutionCreatedAt, err := ptypes.Timestamp(input.Request.Event.OccurredAt)
+ taskExecutionCreatedAt, err := ptypes.Timestamp(input.Request.GetEvent().GetOccurredAt())
if err != nil {
return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to read event timestamp")
}
@@ -232,17 +232,17 @@ func mergeLogs(existing, latest []*core.TaskLog) []*core.TaskLog {
latestSetByURI := make(map[string]*core.TaskLog, len(latest))
latestSetByName := make(map[string]*core.TaskLog, len(latest))
for _, latestLog := range latest {
- latestSetByURI[latestLog.Uri] = latestLog
- if len(latestLog.Name) > 0 {
- latestSetByName[latestLog.Name] = latestLog
+ latestSetByURI[latestLog.GetUri()] = latestLog
+ if len(latestLog.GetName()) > 0 {
+ latestSetByName[latestLog.GetName()] = latestLog
}
}
// Copy over the latest logs since names will change for existing logs as a task transitions across phases.
logs := latest
for _, existingLog := range existing {
- if _, ok := latestSetByURI[existingLog.Uri]; !ok {
- if _, ok = latestSetByName[existingLog.Name]; !ok {
+ if _, ok := latestSetByURI[existingLog.GetUri()]; !ok {
+ if _, ok = latestSetByName[existingLog.GetName()]; !ok {
// We haven't seen this log before: add it to the output result list.
logs = append(logs, existingLog)
}
@@ -299,16 +299,16 @@ func mergeExternalResource(existing, latest *event.ExternalResourceInfo) *event.
return existing
}
- if latest.ExternalId != "" && existing.ExternalId != latest.ExternalId {
- existing.ExternalId = latest.ExternalId
+ if latest.GetExternalId() != "" && existing.GetExternalId() != latest.GetExternalId() {
+ existing.ExternalId = latest.GetExternalId()
}
// note we are not updating existing.Index and existing.RetryAttempt because they are the
// search key for our ExternalResource pool.
- existing.Phase = latest.Phase
- if latest.CacheStatus != core.CatalogCacheStatus_CACHE_DISABLED && existing.CacheStatus != latest.CacheStatus {
- existing.CacheStatus = latest.CacheStatus
+ existing.Phase = latest.GetPhase()
+ if latest.GetCacheStatus() != core.CatalogCacheStatus_CACHE_DISABLED && existing.GetCacheStatus() != latest.GetCacheStatus() {
+ existing.CacheStatus = latest.GetCacheStatus()
}
- existing.Logs = mergeLogs(existing.Logs, latest.Logs)
+ existing.Logs = mergeLogs(existing.GetLogs(), latest.GetLogs())
return existing
}
@@ -357,16 +357,16 @@ func mergeMetadata(existing, latest *event.TaskExecutionMetadata) *event.TaskExe
return existing
}
- if latest.GeneratedName != "" && existing.GeneratedName != latest.GeneratedName {
- existing.GeneratedName = latest.GeneratedName
+ if latest.GetGeneratedName() != "" && existing.GetGeneratedName() != latest.GetGeneratedName() {
+ existing.GeneratedName = latest.GetGeneratedName()
}
- existing.ExternalResources = mergeExternalResources(existing.ExternalResources, latest.ExternalResources)
- existing.ResourcePoolInfo = latest.ResourcePoolInfo
- if latest.PluginIdentifier != "" && existing.PluginIdentifier != latest.PluginIdentifier {
- existing.PluginIdentifier = latest.PluginIdentifier
+ existing.ExternalResources = mergeExternalResources(existing.GetExternalResources(), latest.GetExternalResources())
+ existing.ResourcePoolInfo = latest.GetResourcePoolInfo()
+ if latest.GetPluginIdentifier() != "" && existing.GetPluginIdentifier() != latest.GetPluginIdentifier() {
+ existing.PluginIdentifier = latest.GetPluginIdentifier()
}
- if latest.InstanceClass != event.TaskExecutionMetadata_DEFAULT && existing.InstanceClass != latest.InstanceClass {
- existing.InstanceClass = latest.InstanceClass
+ if latest.GetInstanceClass() != event.TaskExecutionMetadata_DEFAULT && existing.GetInstanceClass() != latest.GetInstanceClass() {
+ existing.InstanceClass = latest.GetInstanceClass()
}
return existing
@@ -374,7 +374,7 @@ func mergeMetadata(existing, latest *event.TaskExecutionMetadata) *event.TaskExe
func filterExternalResourceLogsByPhase(externalResources []*event.ExternalResourceInfo, phase core.TaskExecution_Phase) {
for _, externalResource := range externalResources {
- externalResource.Logs = filterLogsByPhase(externalResource.Logs, phase)
+ externalResource.Logs = filterLogsByPhase(externalResource.GetLogs(), phase)
}
}
@@ -382,13 +382,13 @@ func filterLogsByPhase(logs []*core.TaskLog, phase core.TaskExecution_Phase) []*
filteredLogs := make([]*core.TaskLog, 0, len(logs))
for _, l := range logs {
- if common.IsTaskExecutionTerminal(phase) && l.HideOnceFinished {
+ if common.IsTaskExecutionTerminal(phase) && l.GetHideOnceFinished() {
continue
}
// Some plugins like e.g. Dask, Ray start with or very quickly transition to core.TaskExecution_INITIALIZING
// once the CR has been created even though the underlying pods are still pending. We thus treat queued and
// initializing the same here.
- if (phase == core.TaskExecution_QUEUED || phase == core.TaskExecution_INITIALIZING) && !l.ShowWhilePending {
+ if (phase == core.TaskExecution_QUEUED || phase == core.TaskExecution_INITIALIZING) && !l.GetShowWhilePending() {
continue
}
filteredLogs = append(filteredLogs, l)
@@ -409,45 +409,45 @@ func UpdateTaskExecutionModel(ctx context.Context, request *admin.TaskExecutionE
return errors.NewFlyteAdminErrorf(codes.Internal,
"failed to unmarshal task execution closure with error: %+v", err)
}
- isPhaseChange := taskExecutionModel.Phase != request.Event.Phase.String()
+ isPhaseChange := taskExecutionModel.Phase != request.GetEvent().GetPhase().String()
existingTaskPhase := taskExecutionModel.Phase
- taskExecutionModel.Phase = request.Event.Phase.String()
- taskExecutionModel.PhaseVersion = request.Event.PhaseVersion
- taskExecutionClosure.Phase = request.Event.Phase
- reportedAt := request.Event.ReportedAt
- if reportedAt == nil || (reportedAt.Seconds == 0 && reportedAt.Nanos == 0) {
- reportedAt = request.Event.OccurredAt
+ taskExecutionModel.Phase = request.GetEvent().GetPhase().String()
+ taskExecutionModel.PhaseVersion = request.GetEvent().GetPhaseVersion()
+ taskExecutionClosure.Phase = request.GetEvent().GetPhase()
+ reportedAt := request.GetEvent().GetReportedAt()
+ if reportedAt == nil || (reportedAt.GetSeconds() == 0 && reportedAt.GetNanos() == 0) {
+ reportedAt = request.GetEvent().GetOccurredAt()
}
taskExecutionClosure.UpdatedAt = reportedAt
- mergedLogs := mergeLogs(taskExecutionClosure.Logs, request.Event.Logs)
- filteredLogs := filterLogsByPhase(mergedLogs, request.Event.Phase)
+ mergedLogs := mergeLogs(taskExecutionClosure.GetLogs(), request.GetEvent().GetLogs())
+ filteredLogs := filterLogsByPhase(mergedLogs, request.GetEvent().GetPhase())
taskExecutionClosure.Logs = filteredLogs
- if len(request.Event.Reasons) > 0 {
- for _, reason := range request.Event.Reasons {
+ if len(request.GetEvent().GetReasons()) > 0 {
+ for _, reason := range request.GetEvent().GetReasons() {
taskExecutionClosure.Reasons = append(
- taskExecutionClosure.Reasons,
+ taskExecutionClosure.GetReasons(),
&admin.Reason{
- OccurredAt: reason.OccurredAt,
- Message: reason.Reason,
+ OccurredAt: reason.GetOccurredAt(),
+ Message: reason.GetReason(),
})
}
- taskExecutionClosure.Reason = request.Event.Reasons[len(request.Event.Reasons)-1].Reason
- } else if len(request.Event.Reason) > 0 {
- if taskExecutionClosure.Reason != request.Event.Reason {
+ taskExecutionClosure.Reason = request.GetEvent().GetReasons()[len(request.GetEvent().GetReasons())-1].GetReason()
+ } else if len(request.GetEvent().GetReason()) > 0 {
+ if taskExecutionClosure.GetReason() != request.GetEvent().GetReason() {
// by tracking a time-series of reasons we increase the size of the TaskExecutionClosure in scenarios where
// a task reports a large number of unique reasons. if this size increase becomes problematic we this logic
// will need to be revisited.
taskExecutionClosure.Reasons = append(
- taskExecutionClosure.Reasons,
+ taskExecutionClosure.GetReasons(),
&admin.Reason{
- OccurredAt: request.Event.OccurredAt,
- Message: request.Event.Reason,
+ OccurredAt: request.GetEvent().GetOccurredAt(),
+ Message: request.GetEvent().GetReason(),
})
}
- taskExecutionClosure.Reason = request.Event.Reason
+ taskExecutionClosure.Reason = request.GetEvent().GetReason()
}
if existingTaskPhase != core.TaskExecution_RUNNING.String() && taskExecutionModel.Phase == core.TaskExecution_RUNNING.String() {
err = addTaskStartedState(request, taskExecutionModel, &taskExecutionClosure)
@@ -456,24 +456,24 @@ func UpdateTaskExecutionModel(ctx context.Context, request *admin.TaskExecutionE
}
}
- if common.IsTaskExecutionTerminal(request.Event.Phase) {
+ if common.IsTaskExecutionTerminal(request.GetEvent().GetPhase()) {
err := addTaskTerminalState(ctx, request, taskExecutionModel, &taskExecutionClosure, inlineEventDataPolicy, storageClient)
if err != nil {
return err
}
}
- taskExecutionClosure.CustomInfo, err = mergeCustom(taskExecutionClosure.CustomInfo, request.Event.CustomInfo)
+ taskExecutionClosure.CustomInfo, err = mergeCustom(taskExecutionClosure.GetCustomInfo(), request.GetEvent().GetCustomInfo())
if err != nil {
return errors.NewFlyteAdminErrorf(codes.Internal, "failed to merge task event custom_info with error: %v", err)
}
- taskExecutionClosure.Metadata = mergeMetadata(taskExecutionClosure.Metadata, request.Event.Metadata)
+ taskExecutionClosure.Metadata = mergeMetadata(taskExecutionClosure.GetMetadata(), request.GetEvent().GetMetadata())
- if isPhaseChange && taskExecutionClosure.Metadata != nil && len(taskExecutionClosure.Metadata.ExternalResources) > 0 {
- filterExternalResourceLogsByPhase(taskExecutionClosure.Metadata.ExternalResources, request.Event.Phase)
+ if isPhaseChange && taskExecutionClosure.GetMetadata() != nil && len(taskExecutionClosure.GetMetadata().GetExternalResources()) > 0 {
+ filterExternalResourceLogsByPhase(taskExecutionClosure.GetMetadata().GetExternalResources(), request.GetEvent().GetPhase())
}
- if request.Event.EventVersion > taskExecutionClosure.EventVersion {
- taskExecutionClosure.EventVersion = request.Event.EventVersion
+ if request.GetEvent().GetEventVersion() > taskExecutionClosure.GetEventVersion() {
+ taskExecutionClosure.EventVersion = request.GetEvent().GetEventVersion()
}
marshaledClosure, err := proto.Marshal(&taskExecutionClosure)
if err != nil {
@@ -495,7 +495,7 @@ func FromTaskExecutionModel(taskExecutionModel models.TaskExecution, opts *Execu
if err != nil {
return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal closure")
}
- if closure.GetError() != nil && opts != nil && opts.TrimErrorMessage && len(closure.GetError().Message) > 0 {
+ if closure.GetError() != nil && opts != nil && opts.TrimErrorMessage && len(closure.GetError().GetMessage()) > 0 {
trimmedErrOutputResult := closure.GetError()
trimmedErrMessage := TrimErrorMessage(trimmedErrOutputResult.GetMessage())
trimmedErrOutputResult.Message = trimmedErrMessage
@@ -551,15 +551,15 @@ func handleTaskExecutionInputs(ctx context.Context, taskExecutionModel *models.T
// Inputs are static over the duration of the task execution, no need to update them when they're already set
return nil
}
- switch request.Event.GetInputValue().(type) {
+ switch request.GetEvent().GetInputValue().(type) {
case *event.TaskExecutionEvent_InputUri:
taskExecutionModel.InputURI = request.GetEvent().GetInputUri()
case *event.TaskExecutionEvent_InputData:
uri, err := common.OffloadLiteralMap(ctx, storageClient, request.GetEvent().GetInputData(),
- request.Event.ParentNodeExecutionId.ExecutionId.Project, request.Event.ParentNodeExecutionId.ExecutionId.Domain,
- request.Event.ParentNodeExecutionId.ExecutionId.Name, request.Event.ParentNodeExecutionId.NodeId,
- request.Event.TaskId.Project, request.Event.TaskId.Domain, request.Event.TaskId.Name, request.Event.TaskId.Version,
- strconv.FormatUint(uint64(request.Event.RetryAttempt), 10), InputsObjectSuffix)
+ request.GetEvent().GetParentNodeExecutionId().GetExecutionId().GetProject(), request.GetEvent().GetParentNodeExecutionId().GetExecutionId().GetDomain(),
+ request.GetEvent().GetParentNodeExecutionId().GetExecutionId().GetName(), request.GetEvent().GetParentNodeExecutionId().GetNodeId(),
+ request.GetEvent().GetTaskId().GetProject(), request.GetEvent().GetTaskId().GetDomain(), request.GetEvent().GetTaskId().GetName(), request.GetEvent().GetTaskId().GetVersion(),
+ strconv.FormatUint(uint64(request.GetEvent().GetRetryAttempt()), 10), InputsObjectSuffix)
if err != nil {
return err
}
diff --git a/flyteadmin/pkg/repositories/transformers/task_execution_test.go b/flyteadmin/pkg/repositories/transformers/task_execution_test.go
index 5fc5430192..461ab39c8c 100644
--- a/flyteadmin/pkg/repositories/transformers/task_execution_test.go
+++ b/flyteadmin/pkg/repositories/transformers/task_execution_test.go
@@ -85,7 +85,7 @@ func TestAddTaskStartedState(t *testing.T) {
err := addTaskStartedState(&request, &taskExecutionModel, closure)
assert.Nil(t, err)
- timestamp, err := ptypes.Timestamp(closure.StartedAt)
+ timestamp, err := ptypes.Timestamp(closure.GetStartedAt())
assert.Nil(t, err)
assert.Equal(t, startedAt, timestamp)
assert.Equal(t, &startedAt, taskExecutionModel.StartedAt)
@@ -109,7 +109,7 @@ func TestAddTaskStartedState(t *testing.T) {
err := addTaskStartedState(&request, &taskExecutionModel, closure)
assert.Nil(t, err)
- timestamp, err := ptypes.Timestamp(closure.StartedAt)
+ timestamp, err := ptypes.Timestamp(closure.GetStartedAt())
assert.Nil(t, err)
assert.NotEqual(t, oldStartedAt, timestamp)
assert.Equal(t, &oldStartedAt, taskExecutionModel.StartedAt)
@@ -169,7 +169,7 @@ func TestAddTaskTerminalState_OutputURI(t *testing.T) {
duration, err := ptypes.Duration(closure.GetDuration())
assert.Nil(t, err)
- assert.EqualValues(t, request.Event.OutputResult, closure.OutputResult)
+ assert.EqualValues(t, request.GetEvent().GetOutputResult(), closure.GetOutputResult())
assert.EqualValues(t, outputURI, closure.GetOutputUri())
assert.EqualValues(t, time.Minute, duration)
@@ -232,7 +232,7 @@ func TestAddTaskTerminalState_OutputData(t *testing.T) {
duration, err := ptypes.Duration(closure.GetDuration())
assert.Nil(t, err)
- assert.EqualValues(t, request.Event.OutputResult, closure.OutputResult)
+ assert.EqualValues(t, request.GetEvent().GetOutputResult(), closure.GetOutputResult())
assert.True(t, proto.Equal(outputData, closure.GetOutputData()))
assert.EqualValues(t, time.Minute, duration)
})
@@ -296,17 +296,17 @@ func TestCreateTaskExecutionModelQueued(t *testing.T) {
assert.Equal(t, &models.TaskExecution{
TaskExecutionKey: models.TaskExecutionKey{
TaskKey: models.TaskKey{
- Project: sampleTaskID.Project,
- Domain: sampleTaskID.Domain,
- Name: sampleTaskID.Name,
- Version: sampleTaskID.Version,
+ Project: sampleTaskID.GetProject(),
+ Domain: sampleTaskID.GetDomain(),
+ Name: sampleTaskID.GetName(),
+ Version: sampleTaskID.GetVersion(),
},
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: sampleNodeExecID.NodeId,
+ NodeID: sampleNodeExecID.GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: sampleNodeExecID.ExecutionId.Project,
- Domain: sampleNodeExecID.ExecutionId.Domain,
- Name: sampleNodeExecID.ExecutionId.Name,
+ Project: sampleNodeExecID.GetExecutionId().GetProject(),
+ Domain: sampleNodeExecID.GetExecutionId().GetDomain(),
+ Name: sampleNodeExecID.GetExecutionId().GetName(),
},
},
RetryAttempt: &retryAttemptValue,
@@ -370,7 +370,7 @@ func TestCreateTaskExecutionModelRunning(t *testing.T) {
CustomInfo: &customInfo,
}
- t.Logf("expected %+v %+v\n", expectedClosure.Reason, expectedClosure.Reasons)
+ t.Logf("expected %+v %+v\n", expectedClosure.GetReason(), expectedClosure.GetReasons())
expectedClosureBytes, err := proto.Marshal(expectedClosure)
assert.Nil(t, err)
@@ -378,17 +378,17 @@ func TestCreateTaskExecutionModelRunning(t *testing.T) {
assert.Equal(t, &models.TaskExecution{
TaskExecutionKey: models.TaskExecutionKey{
TaskKey: models.TaskKey{
- Project: sampleTaskID.Project,
- Domain: sampleTaskID.Domain,
- Name: sampleTaskID.Name,
- Version: sampleTaskID.Version,
+ Project: sampleTaskID.GetProject(),
+ Domain: sampleTaskID.GetDomain(),
+ Name: sampleTaskID.GetName(),
+ Version: sampleTaskID.GetVersion(),
},
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: sampleNodeExecID.NodeId,
+ NodeID: sampleNodeExecID.GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: sampleNodeExecID.ExecutionId.Project,
- Domain: sampleNodeExecID.ExecutionId.Domain,
- Name: sampleNodeExecID.ExecutionId.Name,
+ Project: sampleNodeExecID.GetExecutionId().GetProject(),
+ Domain: sampleNodeExecID.GetExecutionId().GetDomain(),
+ Name: sampleNodeExecID.GetExecutionId().GetName(),
},
},
RetryAttempt: &retryAttemptValue,
@@ -522,17 +522,17 @@ func TestUpdateTaskExecutionModelRunningToFailed(t *testing.T) {
existingTaskExecution := models.TaskExecution{
TaskExecutionKey: models.TaskExecutionKey{
TaskKey: models.TaskKey{
- Project: sampleTaskID.Project,
- Domain: sampleTaskID.Domain,
- Name: sampleTaskID.Name,
- Version: sampleTaskID.Version,
+ Project: sampleTaskID.GetProject(),
+ Domain: sampleTaskID.GetDomain(),
+ Name: sampleTaskID.GetName(),
+ Version: sampleTaskID.GetVersion(),
},
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: sampleNodeExecID.NodeId,
+ NodeID: sampleNodeExecID.GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: sampleNodeExecID.ExecutionId.Project,
- Domain: sampleNodeExecID.ExecutionId.Domain,
- Name: sampleNodeExecID.ExecutionId.Name,
+ Project: sampleNodeExecID.GetExecutionId().GetProject(),
+ Domain: sampleNodeExecID.GetExecutionId().GetDomain(),
+ Name: sampleNodeExecID.GetExecutionId().GetName(),
},
},
RetryAttempt: &retryAttemptValue,
@@ -627,17 +627,17 @@ func TestUpdateTaskExecutionModelRunningToFailed(t *testing.T) {
assert.EqualValues(t, models.TaskExecution{
TaskExecutionKey: models.TaskExecutionKey{
TaskKey: models.TaskKey{
- Project: sampleTaskID.Project,
- Domain: sampleTaskID.Domain,
- Name: sampleTaskID.Name,
- Version: sampleTaskID.Version,
+ Project: sampleTaskID.GetProject(),
+ Domain: sampleTaskID.GetDomain(),
+ Name: sampleTaskID.GetName(),
+ Version: sampleTaskID.GetVersion(),
},
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: sampleNodeExecID.NodeId,
+ NodeID: sampleNodeExecID.GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: sampleNodeExecID.ExecutionId.Project,
- Domain: sampleNodeExecID.ExecutionId.Domain,
- Name: sampleNodeExecID.ExecutionId.Name,
+ Project: sampleNodeExecID.GetExecutionId().GetProject(),
+ Domain: sampleNodeExecID.GetExecutionId().GetDomain(),
+ Name: sampleNodeExecID.GetExecutionId().GetName(),
},
},
RetryAttempt: &retryAttemptValue,
@@ -675,17 +675,17 @@ func TestUpdateTaskExecutionModelFilterLogLinks(t *testing.T) {
existingTaskExecution := models.TaskExecution{
TaskExecutionKey: models.TaskExecutionKey{
TaskKey: models.TaskKey{
- Project: sampleTaskID.Project,
- Domain: sampleTaskID.Domain,
- Name: sampleTaskID.Name,
- Version: sampleTaskID.Version,
+ Project: sampleTaskID.GetProject(),
+ Domain: sampleTaskID.GetDomain(),
+ Name: sampleTaskID.GetName(),
+ Version: sampleTaskID.GetVersion(),
},
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: sampleNodeExecID.NodeId,
+ NodeID: sampleNodeExecID.GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: sampleNodeExecID.ExecutionId.Project,
- Domain: sampleNodeExecID.ExecutionId.Domain,
- Name: sampleNodeExecID.ExecutionId.Name,
+ Project: sampleNodeExecID.GetExecutionId().GetProject(),
+ Domain: sampleNodeExecID.GetExecutionId().GetDomain(),
+ Name: sampleNodeExecID.GetExecutionId().GetName(),
},
},
RetryAttempt: &retryAttemptValue,
@@ -729,7 +729,7 @@ func TestUpdateTaskExecutionModelFilterLogLinks(t *testing.T) {
err = proto.Unmarshal(existingTaskExecution.Closure, updatedClosure)
assert.Nil(t, err)
- assert.Equal(t, updatedClosure.Logs, []*core.TaskLog{
+ assert.Equal(t, updatedClosure.GetLogs(), []*core.TaskLog{
{
Uri: "uri-show-pending",
ShowWhilePending: true,
@@ -776,17 +776,17 @@ func TestUpdateTaskExecutionModelFilterLogLinksArray(t *testing.T) {
existingTaskExecution := models.TaskExecution{
TaskExecutionKey: models.TaskExecutionKey{
TaskKey: models.TaskKey{
- Project: sampleTaskID.Project,
- Domain: sampleTaskID.Domain,
- Name: sampleTaskID.Name,
- Version: sampleTaskID.Version,
+ Project: sampleTaskID.GetProject(),
+ Domain: sampleTaskID.GetDomain(),
+ Name: sampleTaskID.GetName(),
+ Version: sampleTaskID.GetVersion(),
},
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: sampleNodeExecID.NodeId,
+ NodeID: sampleNodeExecID.GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: sampleNodeExecID.ExecutionId.Project,
- Domain: sampleNodeExecID.ExecutionId.Domain,
- Name: sampleNodeExecID.ExecutionId.Name,
+ Project: sampleNodeExecID.GetExecutionId().GetProject(),
+ Domain: sampleNodeExecID.GetExecutionId().GetDomain(),
+ Name: sampleNodeExecID.GetExecutionId().GetName(),
},
},
RetryAttempt: &retryAttemptValue,
@@ -821,7 +821,7 @@ func TestUpdateTaskExecutionModelFilterLogLinksArray(t *testing.T) {
err = proto.Unmarshal(existingTaskExecution.Closure, updatedClosure)
assert.Nil(t, err)
- assert.Equal(t, updatedClosure.Metadata.ExternalResources[0].Logs, []*core.TaskLog{
+ assert.Equal(t, updatedClosure.GetMetadata().GetExternalResources()[0].GetLogs(), []*core.TaskLog{
{
Uri: "uri-default",
},
@@ -851,17 +851,17 @@ func TestUpdateTaskExecutionModelSingleEvents(t *testing.T) {
existingTaskExecution := models.TaskExecution{
TaskExecutionKey: models.TaskExecutionKey{
TaskKey: models.TaskKey{
- Project: sampleTaskID.Project,
- Domain: sampleTaskID.Domain,
- Name: sampleTaskID.Name,
- Version: sampleTaskID.Version,
+ Project: sampleTaskID.GetProject(),
+ Domain: sampleTaskID.GetDomain(),
+ Name: sampleTaskID.GetName(),
+ Version: sampleTaskID.GetVersion(),
},
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: sampleNodeExecID.NodeId,
+ NodeID: sampleNodeExecID.GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: sampleNodeExecID.ExecutionId.Project,
- Domain: sampleNodeExecID.ExecutionId.Domain,
- Name: sampleNodeExecID.ExecutionId.Name,
+ Project: sampleNodeExecID.GetExecutionId().GetProject(),
+ Domain: sampleNodeExecID.GetExecutionId().GetDomain(),
+ Name: sampleNodeExecID.GetExecutionId().GetName(),
},
},
RetryAttempt: &retryAttemptValue,
@@ -943,17 +943,17 @@ func TestUpdateTaskExecutionModelBatchedEvents(t *testing.T) {
existingTaskExecution := models.TaskExecution{
TaskExecutionKey: models.TaskExecutionKey{
TaskKey: models.TaskKey{
- Project: sampleTaskID.Project,
- Domain: sampleTaskID.Domain,
- Name: sampleTaskID.Name,
- Version: sampleTaskID.Version,
+ Project: sampleTaskID.GetProject(),
+ Domain: sampleTaskID.GetDomain(),
+ Name: sampleTaskID.GetName(),
+ Version: sampleTaskID.GetVersion(),
},
NodeExecutionKey: models.NodeExecutionKey{
- NodeID: sampleNodeExecID.NodeId,
+ NodeID: sampleNodeExecID.GetNodeId(),
ExecutionKey: models.ExecutionKey{
- Project: sampleNodeExecID.ExecutionId.Project,
- Domain: sampleNodeExecID.ExecutionId.Domain,
- Name: sampleNodeExecID.ExecutionId.Name,
+ Project: sampleNodeExecID.GetExecutionId().GetProject(),
+ Domain: sampleNodeExecID.GetExecutionId().GetDomain(),
+ Name: sampleNodeExecID.GetExecutionId().GetName(),
},
},
RetryAttempt: &retryAttemptValue,
@@ -1130,7 +1130,7 @@ func TestFromTaskExecutionModel_Error(t *testing.T) {
expectedExecErr := execErr
expectedExecErr.Message = string(make([]byte, trimmedErrMessageLen))
assert.Nil(t, err)
- assert.True(t, proto.Equal(expectedExecErr, taskExecution.Closure.GetError()))
+ assert.True(t, proto.Equal(expectedExecErr, taskExecution.GetClosure().GetError()))
extraShortErrMsg := string(make([]byte, 10))
execErr = &core.ExecutionError{
@@ -1149,7 +1149,7 @@ func TestFromTaskExecutionModel_Error(t *testing.T) {
expectedExecErr = execErr
expectedExecErr.Message = string(make([]byte, 10))
assert.Nil(t, err)
- assert.True(t, proto.Equal(expectedExecErr, taskExecution.Closure.GetError()))
+ assert.True(t, proto.Equal(expectedExecErr, taskExecution.GetClosure().GetError()))
}
func TestFromTaskExecutionModels(t *testing.T) {
diff --git a/flyteadmin/pkg/repositories/transformers/task_test.go b/flyteadmin/pkg/repositories/transformers/task_test.go
index 1f01f4b720..8fac3863d7 100644
--- a/flyteadmin/pkg/repositories/transformers/task_test.go
+++ b/flyteadmin/pkg/repositories/transformers/task_test.go
@@ -52,10 +52,10 @@ func TestFromTaskModel(t *testing.T) {
Domain: "domain",
Name: "name",
Version: "version",
- }, task.Id))
+ }, task.GetId()))
expectedClosure := testutils.GetTaskClosure()
expectedClosure.CreatedAt = createdAtProto
- assert.True(t, proto.Equal(expectedClosure, task.Closure))
+ assert.True(t, proto.Equal(expectedClosure, task.GetClosure()))
}
func TestFromTaskModels(t *testing.T) {
@@ -100,10 +100,10 @@ func TestFromTaskModels(t *testing.T) {
Domain: "domain a",
Name: "name a",
Version: "version a",
- }, taskList[0].Id))
+ }, taskList[0].GetId()))
expectedClosure := testutils.GetTaskClosure()
expectedClosure.CreatedAt = createdAtAProto
- assert.True(t, proto.Equal(expectedClosure, taskList[0].Closure))
+ assert.True(t, proto.Equal(expectedClosure, taskList[0].GetClosure()))
assert.True(t, proto.Equal(&core.Identifier{
ResourceType: core.ResourceType_TASK,
@@ -111,11 +111,11 @@ func TestFromTaskModels(t *testing.T) {
Domain: "domain b",
Name: "name b",
Version: "version b",
- }, taskList[1].Id))
+ }, taskList[1].GetId()))
expectedClosure = &admin.TaskClosure{
CreatedAt: createdAtBProto,
}
- assert.True(t, proto.Equal(expectedClosure, taskList[1].Closure))
+ assert.True(t, proto.Equal(expectedClosure, taskList[1].GetClosure()))
}
func TestFromTaskModelsToIdentifiers(t *testing.T) {
@@ -139,10 +139,10 @@ func TestFromTaskModelsToIdentifiers(t *testing.T) {
}
taskIds := FromTaskModelsToIdentifiers(taskModels)
- assert.Equal(t, "domain a", taskIds[0].Domain)
- assert.Equal(t, "project a", taskIds[0].Project)
- assert.Equal(t, "name a", taskIds[0].Name)
- assert.Equal(t, "domain b", taskIds[1].Domain)
- assert.Equal(t, "project b", taskIds[1].Project)
- assert.Equal(t, "name b", taskIds[1].Name)
+ assert.Equal(t, "domain a", taskIds[0].GetDomain())
+ assert.Equal(t, "project a", taskIds[0].GetProject())
+ assert.Equal(t, "name a", taskIds[0].GetName())
+ assert.Equal(t, "domain b", taskIds[1].GetDomain())
+ assert.Equal(t, "project b", taskIds[1].GetProject())
+ assert.Equal(t, "name b", taskIds[1].GetName())
}
diff --git a/flyteadmin/pkg/repositories/transformers/workflow.go b/flyteadmin/pkg/repositories/transformers/workflow.go
index a796987955..0dbf7196a8 100644
--- a/flyteadmin/pkg/repositories/transformers/workflow.go
+++ b/flyteadmin/pkg/repositories/transformers/workflow.go
@@ -17,8 +17,8 @@ import (
func CreateWorkflowModel(request *admin.WorkflowCreateRequest, remoteClosureIdentifier string,
digest []byte) (models.Workflow, error) {
var typedInterface []byte
- if request.Spec != nil && request.Spec.Template != nil && request.Spec.Template.Interface != nil {
- serializedTypedInterface, err := proto.Marshal(request.Spec.Template.Interface)
+ if request.GetSpec() != nil && request.GetSpec().GetTemplate() != nil && request.GetSpec().GetTemplate().GetInterface() != nil {
+ serializedTypedInterface, err := proto.Marshal(request.GetSpec().GetTemplate().GetInterface())
if err != nil {
return models.Workflow{}, errors.NewFlyteAdminError(codes.Internal, "Failed to serialize workflow spec")
}
@@ -26,10 +26,10 @@ func CreateWorkflowModel(request *admin.WorkflowCreateRequest, remoteClosureIden
}
return models.Workflow{
WorkflowKey: models.WorkflowKey{
- Project: request.Id.Project,
- Domain: request.Id.Domain,
- Name: request.Id.Name,
- Version: request.Id.Version,
+ Project: request.GetId().GetProject(),
+ Domain: request.GetId().GetDomain(),
+ Name: request.GetId().GetName(),
+ Version: request.GetId().GetVersion(),
},
TypedInterface: typedInterface,
RemoteClosureIdentifier: remoteClosureIdentifier,
@@ -54,7 +54,7 @@ func FromWorkflowModel(workflowModel models.Workflow) (admin.Workflow, error) {
if len(workflowModel.TypedInterface) > 0 {
err = proto.Unmarshal(workflowModel.TypedInterface, &workflowInterface)
if err != nil {
- return admin.Workflow{}, errors.NewFlyteAdminErrorf(codes.Internal, fmt.Sprintf("failed to unmarshal workflow %v interface. Error message: %v", workflowModel.ID, err.Error()))
+ return admin.Workflow{}, errors.NewFlyteAdminErrorf(codes.Internal, fmt.Sprintf("failed to unmarshal workflow %v interface. Error message: %v", workflowModel.ID, err.Error())) //nolint
}
}
diff --git a/flyteadmin/pkg/repositories/transformers/workflow_test.go b/flyteadmin/pkg/repositories/transformers/workflow_test.go
index 0f29aaa64e..95a698075d 100644
--- a/flyteadmin/pkg/repositories/transformers/workflow_test.go
+++ b/flyteadmin/pkg/repositories/transformers/workflow_test.go
@@ -70,7 +70,7 @@ func TestFromWorkflowModel(t *testing.T) {
Domain: "domain",
Name: "name",
Version: "version",
- }, workflow.Id))
+ }, workflow.GetId()))
var workflowInterface core.TypedInterface
err = proto.Unmarshal(workflowModel.TypedInterface, &workflowInterface)
@@ -85,7 +85,7 @@ func TestFromWorkflowModel(t *testing.T) {
},
},
},
- }, workflow.Closure))
+ }, workflow.GetClosure()))
}
func TestFromWorkflowModels(t *testing.T) {
@@ -131,7 +131,7 @@ func TestFromWorkflowModels(t *testing.T) {
Domain: "domain a",
Name: "name a",
Version: "version a",
- }, workflowList[0].Id))
+ }, workflowList[0].GetId()))
workflowInterface := testutils.GetWorkflowRequestInterface()
assert.NoError(t, err)
@@ -145,7 +145,7 @@ func TestFromWorkflowModels(t *testing.T) {
},
},
},
- }, workflowList[0].Closure))
+ }, workflowList[0].GetClosure()))
assert.True(t, proto.Equal(&core.Identifier{
ResourceType: core.ResourceType_WORKFLOW,
@@ -153,7 +153,7 @@ func TestFromWorkflowModels(t *testing.T) {
Domain: "domain b",
Name: "name b",
Version: "version b",
- }, workflowList[1].Id))
+ }, workflowList[1].GetId()))
assert.True(t, proto.Equal(&admin.WorkflowClosure{
CreatedAt: createdAtBProto,
@@ -164,5 +164,5 @@ func TestFromWorkflowModels(t *testing.T) {
},
},
},
- }, workflowList[1].Closure))
+ }, workflowList[1].GetClosure()))
}
diff --git a/flyteadmin/pkg/rpc/adminservice/description_entity.go b/flyteadmin/pkg/rpc/adminservice/description_entity.go
index 91e3f0f134..b8bb5f1d06 100644
--- a/flyteadmin/pkg/rpc/adminservice/description_entity.go
+++ b/flyteadmin/pkg/rpc/adminservice/description_entity.go
@@ -12,7 +12,7 @@ import (
func (m *AdminService) GetDescriptionEntity(ctx context.Context, request *admin.ObjectGetRequest) (*admin.DescriptionEntity, error) {
// NOTE: When the Get HTTP endpoint is called the resource type is implicit (from the URL) so we must add it
// to the request.
- if request.Id != nil && request.Id.ResourceType == core.ResourceType_UNSPECIFIED {
+ if request.GetId() != nil && request.GetId().GetResourceType() == core.ResourceType_UNSPECIFIED {
logger.Infof(ctx, "Adding resource type for unspecified value in request: [%+v]", request)
request.Id.ResourceType = core.ResourceType_TASK
}
diff --git a/flyteadmin/pkg/rpc/adminservice/launch_plan.go b/flyteadmin/pkg/rpc/adminservice/launch_plan.go
index f0dabdb76c..1cdf757362 100644
--- a/flyteadmin/pkg/rpc/adminservice/launch_plan.go
+++ b/flyteadmin/pkg/rpc/adminservice/launch_plan.go
@@ -26,7 +26,7 @@ func (m *AdminService) CreateLaunchPlan(
func (m *AdminService) GetLaunchPlan(ctx context.Context, request *admin.ObjectGetRequest) (*admin.LaunchPlan, error) {
// NOTE: When the Get HTTP endpoint is called the resource type is implicit (from the URL) so we must add it
// to the request.
- if request.Id != nil && request.Id.ResourceType == core.ResourceType_UNSPECIFIED {
+ if request.GetId() != nil && request.GetId().GetResourceType() == core.ResourceType_UNSPECIFIED {
logger.Infof(ctx, "Adding resource type for unspecified value in request: [%+v]", request)
request.Id.ResourceType = core.ResourceType_LAUNCH_PLAN
}
@@ -60,7 +60,7 @@ func (m *AdminService) UpdateLaunchPlan(ctx context.Context, request *admin.Laun
*admin.LaunchPlanUpdateResponse, error) {
// NOTE: When the Get HTTP endpoint is called the resource type is implicit (from the URL) so we must add it
// to the request.
- if request.Id != nil && request.Id.ResourceType == core.ResourceType_UNSPECIFIED {
+ if request.GetId() != nil && request.GetId().GetResourceType() == core.ResourceType_UNSPECIFIED {
logger.Infof(ctx, "Adding resource type for unspecified value in request: [%+v]", request)
request.Id.ResourceType = core.ResourceType_LAUNCH_PLAN
}
diff --git a/flyteadmin/pkg/rpc/adminservice/node_execution.go b/flyteadmin/pkg/rpc/adminservice/node_execution.go
index e8965edd1d..3f6383a4e4 100644
--- a/flyteadmin/pkg/rpc/adminservice/node_execution.go
+++ b/flyteadmin/pkg/rpc/adminservice/node_execution.go
@@ -68,8 +68,8 @@ func (m *AdminService) ListNodeExecutionsForTask(
ctx context.Context, request *admin.NodeExecutionForTaskListRequest) (*admin.NodeExecutionList, error) {
// NOTE: When the Get HTTP endpoint is called the resource type is implicit (from the URL) so we must add it
// to the request.
- if request.TaskExecutionId != nil && request.TaskExecutionId.TaskId != nil &&
- request.TaskExecutionId.TaskId.ResourceType == core.ResourceType_UNSPECIFIED {
+ if request.GetTaskExecutionId() != nil && request.GetTaskExecutionId().GetTaskId() != nil &&
+ request.GetTaskExecutionId().GetTaskId().GetResourceType() == core.ResourceType_UNSPECIFIED {
logger.Infof(ctx, "Adding resource type for unspecified value in request: [%+v]", request)
request.TaskExecutionId.TaskId.ResourceType = core.ResourceType_TASK
}
diff --git a/flyteadmin/pkg/rpc/adminservice/task.go b/flyteadmin/pkg/rpc/adminservice/task.go
index 50ed9f8eba..9d4e2883b3 100644
--- a/flyteadmin/pkg/rpc/adminservice/task.go
+++ b/flyteadmin/pkg/rpc/adminservice/task.go
@@ -27,7 +27,7 @@ func (m *AdminService) CreateTask(
func (m *AdminService) GetTask(ctx context.Context, request *admin.ObjectGetRequest) (*admin.Task, error) {
// NOTE: When the Get HTTP endpoint is called the resource type is implicit (from the URL) so we must add it
// to the request.
- if request.Id != nil && request.Id.ResourceType == core.ResourceType_UNSPECIFIED {
+ if request.GetId() != nil && request.GetId().GetResourceType() == core.ResourceType_UNSPECIFIED {
logger.Infof(ctx, "Adding resource type for unspecified value in request: [%+v]", request)
request.Id.ResourceType = core.ResourceType_TASK
}
diff --git a/flyteadmin/pkg/rpc/adminservice/task_execution.go b/flyteadmin/pkg/rpc/adminservice/task_execution.go
index d0d8a99d56..3b98fe5057 100644
--- a/flyteadmin/pkg/rpc/adminservice/task_execution.go
+++ b/flyteadmin/pkg/rpc/adminservice/task_execution.go
@@ -28,11 +28,11 @@ func (m *AdminService) GetTaskExecution(
ctx context.Context, request *admin.TaskExecutionGetRequest) (*admin.TaskExecution, error) {
// NOTE: When the Get HTTP endpoint is called the resource type is implicit (from the URL) so we must add it
// to the request.
- if request.Id != nil && request.Id.TaskId != nil && request.Id.TaskId.ResourceType == core.ResourceType_UNSPECIFIED {
+ if request.GetId() != nil && request.GetId().GetTaskId() != nil && request.GetId().GetTaskId().GetResourceType() == core.ResourceType_UNSPECIFIED {
logger.Infof(ctx, "Adding resource type for unspecified value in request: [%+v]", request)
request.Id.TaskId.ResourceType = core.ResourceType_TASK
}
- if err := validation.ValidateTaskExecutionIdentifier(request.Id); err != nil {
+ if err := validation.ValidateTaskExecutionIdentifier(request.GetId()); err != nil {
return nil, err
}
@@ -70,7 +70,7 @@ func (m *AdminService) GetTaskExecutionData(
ctx context.Context, request *admin.TaskExecutionGetDataRequest) (*admin.TaskExecutionGetDataResponse, error) {
// NOTE: When the Get HTTP endpoint is called the resource type is implicit (from the URL) so we must add it
// to the request.
- if request.Id != nil && request.Id.TaskId != nil && request.Id.TaskId.ResourceType == core.ResourceType_UNSPECIFIED {
+ if request.GetId() != nil && request.GetId().GetTaskId() != nil && request.GetId().GetTaskId().GetResourceType() == core.ResourceType_UNSPECIFIED {
logger.Infof(ctx, "Adding resource type for unspecified value in request: [%+v]", request)
request.Id.TaskId.ResourceType = core.ResourceType_TASK
}
diff --git a/flyteadmin/pkg/rpc/adminservice/tests/execution_test.go b/flyteadmin/pkg/rpc/adminservice/tests/execution_test.go
index f541eea806..ef73e60eaa 100644
--- a/flyteadmin/pkg/rpc/adminservice/tests/execution_test.go
+++ b/flyteadmin/pkg/rpc/adminservice/tests/execution_test.go
@@ -36,9 +36,9 @@ func TestCreateExecutionHappyCase(t *testing.T) {
request *admin.ExecutionCreateRequest, requestedAt time.Time) (*admin.ExecutionCreateResponse, error) {
return &admin.ExecutionCreateResponse{
Id: &core.WorkflowExecutionIdentifier{
- Project: request.Project,
- Domain: request.Domain,
- Name: request.Name,
+ Project: request.GetProject(),
+ Domain: request.GetDomain(),
+ Name: request.GetName(),
},
}, nil
},
@@ -52,7 +52,7 @@ func TestCreateExecutionHappyCase(t *testing.T) {
Domain: "Domain",
Project: "Project",
})
- assert.True(t, proto.Equal(&workflowExecutionIdentifier, resp.Id))
+ assert.True(t, proto.Equal(&workflowExecutionIdentifier, resp.GetId()))
assert.NoError(t, err)
}
@@ -64,9 +64,9 @@ func TestCreateExecutionError(t *testing.T) {
func(ctx context.Context,
request *admin.ExecutionCreateRequest, requestedAt time.Time) (*admin.ExecutionCreateResponse, error) {
return nil, repoErrors.GetMissingEntityError("execution", &core.Identifier{
- Project: request.Project,
- Domain: request.Domain,
- Name: request.Name,
+ Project: request.GetProject(),
+ Domain: request.GetDomain(),
+ Name: request.GetName(),
})
},
)
@@ -93,9 +93,9 @@ func TestRelaunchExecutionHappyCase(t *testing.T) {
request *admin.ExecutionRelaunchRequest, requestedAt time.Time) (*admin.ExecutionCreateResponse, error) {
return &admin.ExecutionCreateResponse{
Id: &core.WorkflowExecutionIdentifier{
- Project: request.Id.Project,
- Domain: request.Id.Domain,
- Name: request.Name,
+ Project: request.GetId().GetProject(),
+ Domain: request.GetId().GetDomain(),
+ Name: request.GetName(),
},
}, nil
},
@@ -111,9 +111,9 @@ func TestRelaunchExecutionHappyCase(t *testing.T) {
},
Name: "name",
})
- assert.Equal(t, "project", resp.Id.Project)
- assert.Equal(t, "domain", resp.Id.Domain)
- assert.Equal(t, "name", resp.Id.Name)
+ assert.Equal(t, "project", resp.GetId().GetProject())
+ assert.Equal(t, "domain", resp.GetId().GetDomain())
+ assert.Equal(t, "name", resp.GetId().GetName())
assert.NoError(t, err)
}
@@ -124,7 +124,7 @@ func TestRelaunchExecutionError(t *testing.T) {
mockExecutionManager.SetRelaunchCallback(
func(ctx context.Context,
request *admin.ExecutionRelaunchRequest, requestedAt time.Time) (*admin.ExecutionCreateResponse, error) {
- return nil, repoErrors.GetMissingEntityError("execution", request.Id)
+ return nil, repoErrors.GetMissingEntityError("execution", request.GetId())
},
)
mockServer := NewMockAdminServer(NewMockAdminServerInput{
@@ -148,9 +148,9 @@ func TestRecoverExecutionHappyCase(t *testing.T) {
request *admin.ExecutionRecoverRequest, requestedAt time.Time) (*admin.ExecutionCreateResponse, error) {
return &admin.ExecutionCreateResponse{
Id: &core.WorkflowExecutionIdentifier{
- Project: request.Id.Project,
- Domain: request.Id.Domain,
- Name: request.Name,
+ Project: request.GetId().GetProject(),
+ Domain: request.GetId().GetDomain(),
+ Name: request.GetName(),
},
}, nil
}
@@ -166,9 +166,9 @@ func TestRecoverExecutionHappyCase(t *testing.T) {
},
Name: "name",
})
- assert.Equal(t, "project", resp.Id.Project)
- assert.Equal(t, "domain", resp.Id.Domain)
- assert.Equal(t, "name", resp.Id.Name)
+ assert.Equal(t, "project", resp.GetId().GetProject())
+ assert.Equal(t, "domain", resp.GetId().GetDomain())
+ assert.Equal(t, "name", resp.GetId().GetName())
assert.NoError(t, err)
}
@@ -179,7 +179,7 @@ func TestRecoverExecutionError(t *testing.T) {
mockExecutionManager.RecoverExecutionFunc =
func(ctx context.Context,
request *admin.ExecutionRecoverRequest, requestedAt time.Time) (*admin.ExecutionCreateResponse, error) {
- return nil, repoErrors.GetMissingEntityError("execution", request.Id)
+ return nil, repoErrors.GetMissingEntityError("execution", request.GetId())
}
mockServer := NewMockAdminServer(NewMockAdminServerInput{
executionManager: &mockExecutionManager,
@@ -199,10 +199,10 @@ func TestCreateWorkflowEvent(t *testing.T) {
mockExecutionManager.SetCreateEventCallback(
func(ctx context.Context, request *admin.WorkflowExecutionEventRequest) (
*admin.WorkflowExecutionEventResponse, error) {
- assert.Equal(t, requestID, request.RequestId)
- assert.NotNil(t, request.Event)
- assert.True(t, proto.Equal(&workflowExecutionIdentifier, request.Event.ExecutionId))
- assert.Equal(t, phase, request.Event.Phase)
+ assert.Equal(t, requestID, request.GetRequestId())
+ assert.NotNil(t, request.GetEvent())
+ assert.True(t, proto.Equal(&workflowExecutionIdentifier, request.GetEvent().GetExecutionId()))
+ assert.Equal(t, phase, request.GetEvent().GetPhase())
return &admin.WorkflowExecutionEventResponse{}, nil
})
mockServer := NewMockAdminServer(NewMockAdminServerInput{
@@ -248,7 +248,7 @@ func TestGetExecution(t *testing.T) {
mockExecutionManager.SetGetCallback(
func(ctx context.Context,
request *admin.WorkflowExecutionGetRequest) (*admin.Execution, error) {
- assert.True(t, proto.Equal(&workflowExecutionIdentifier, request.Id))
+ assert.True(t, proto.Equal(&workflowExecutionIdentifier, request.GetId()))
return response, nil
},
)
@@ -288,7 +288,7 @@ func TestUpdateExecution(t *testing.T) {
mockExecutionManager.SetUpdateExecutionCallback(
func(ctx context.Context,
request *admin.ExecutionUpdateRequest, requestedAt time.Time) (*admin.ExecutionUpdateResponse, error) {
- assert.True(t, proto.Equal(&workflowExecutionIdentifier, request.Id))
+ assert.True(t, proto.Equal(&workflowExecutionIdentifier, request.GetId()))
return response, nil
},
)
@@ -326,9 +326,9 @@ func TestListExecutions(t *testing.T) {
mockExecutionManager := mocks.MockExecutionManager{}
mockExecutionManager.SetListCallback(func(ctx context.Context, request *admin.ResourceListRequest) (
*admin.ExecutionList, error) {
- assert.Equal(t, "project", request.Id.Project)
- assert.Equal(t, "domain", request.Id.Domain)
- assert.Equal(t, uint32(1), request.Limit)
+ assert.Equal(t, "project", request.GetId().GetProject())
+ assert.Equal(t, "domain", request.GetId().GetDomain())
+ assert.Equal(t, uint32(1), request.GetLimit())
return &admin.ExecutionList{
Executions: []*admin.Execution{
{
@@ -350,7 +350,7 @@ func TestListExecutions(t *testing.T) {
Limit: 1,
})
assert.NoError(t, err)
- assert.Len(t, response.Executions, 1)
+ assert.Len(t, response.GetExecutions(), 1)
}
func TestListExecutionsError(t *testing.T) {
@@ -386,8 +386,8 @@ func TestTerminateExecution(t *testing.T) {
abortCause := "abort cause"
mockExecutionManager.SetTerminateExecutionCallback(func(
ctx context.Context, request *admin.ExecutionTerminateRequest) (*admin.ExecutionTerminateResponse, error) {
- assert.True(t, proto.Equal(&identifier, request.Id))
- assert.Equal(t, abortCause, request.Cause)
+ assert.True(t, proto.Equal(&identifier, request.GetId()))
+ assert.Equal(t, abortCause, request.GetCause())
return &admin.ExecutionTerminateResponse{}, nil
})
mockServer := NewMockAdminServer(NewMockAdminServerInput{
diff --git a/flyteadmin/pkg/rpc/adminservice/tests/launch_plan_test.go b/flyteadmin/pkg/rpc/adminservice/tests/launch_plan_test.go
index 00cd10e04f..4fabdbb9c0 100644
--- a/flyteadmin/pkg/rpc/adminservice/tests/launch_plan_test.go
+++ b/flyteadmin/pkg/rpc/adminservice/tests/launch_plan_test.go
@@ -47,7 +47,7 @@ func TestCreateLaunchPlanError(t *testing.T) {
mockLaunchPlanManager.SetCreateCallback(
func(ctx context.Context,
request *admin.LaunchPlanCreateRequest) (*admin.LaunchPlanCreateResponse, error) {
- return nil, errors.GetMissingEntityError(core.ResourceType_LAUNCH_PLAN.String(), request.Id)
+ return nil, errors.GetMissingEntityError(core.ResourceType_LAUNCH_PLAN.String(), request.GetId())
},
)
mockServer := NewMockAdminServer(NewMockAdminServerInput{
diff --git a/flyteadmin/pkg/rpc/adminservice/tests/node_execution_test.go b/flyteadmin/pkg/rpc/adminservice/tests/node_execution_test.go
index 575140fef0..72cdc57ea5 100644
--- a/flyteadmin/pkg/rpc/adminservice/tests/node_execution_test.go
+++ b/flyteadmin/pkg/rpc/adminservice/tests/node_execution_test.go
@@ -32,10 +32,10 @@ func TestCreateNodeEvent(t *testing.T) {
mockNodeExecutionManager.SetCreateNodeEventCallback(
func(ctx context.Context, request *admin.NodeExecutionEventRequest) (
*admin.NodeExecutionEventResponse, error) {
- assert.Equal(t, requestID, request.RequestId)
- assert.NotNil(t, request.Event)
- assert.True(t, proto.Equal(&nodeExecutionID, request.Event.Id))
- assert.Equal(t, phase, request.Event.Phase)
+ assert.Equal(t, requestID, request.GetRequestId())
+ assert.NotNil(t, request.GetEvent())
+ assert.True(t, proto.Equal(&nodeExecutionID, request.GetEvent().GetId()))
+ assert.Equal(t, phase, request.GetEvent().GetPhase())
return &admin.NodeExecutionEventResponse{}, nil
})
mockServer := NewMockAdminServer(NewMockAdminServerInput{
@@ -82,7 +82,7 @@ func TestGetNodeExecution(t *testing.T) {
mockNodeExecutionManager.SetGetNodeExecutionFunc(
func(ctx context.Context,
request *admin.NodeExecutionGetRequest) (*admin.NodeExecution, error) {
- assert.True(t, proto.Equal(&nodeExecutionID, request.Id))
+ assert.True(t, proto.Equal(&nodeExecutionID, request.GetId()))
return response, nil
},
)
@@ -102,7 +102,7 @@ func TestGetNodeExecutionError(t *testing.T) {
mockNodeExecutionManager.SetGetNodeExecutionFunc(
func(ctx context.Context,
request *admin.NodeExecutionGetRequest) (*admin.NodeExecution, error) {
- assert.True(t, proto.Equal(&nodeExecutionID, request.Id))
+ assert.True(t, proto.Equal(&nodeExecutionID, request.GetId()))
return nil, errors.New("expected error")
},
)
@@ -123,9 +123,9 @@ func TestListNodeExecutions(t *testing.T) {
filters := "encoded filters probably"
mockNodeExecutionManager.SetListNodeExecutionsFunc(func(ctx context.Context, request *admin.NodeExecutionListRequest) (
*admin.NodeExecutionList, error) {
- assert.Equal(t, filters, request.Filters)
- assert.Equal(t, uint32(1), request.Limit)
- assert.Equal(t, "20", request.Token)
+ assert.Equal(t, filters, request.GetFilters())
+ assert.Equal(t, uint32(1), request.GetLimit())
+ assert.Equal(t, "20", request.GetToken())
return &admin.NodeExecutionList{
NodeExecutions: []*admin.NodeExecution{
{
@@ -145,7 +145,7 @@ func TestListNodeExecutions(t *testing.T) {
Token: "20",
})
assert.NoError(t, err)
- assert.Len(t, response.NodeExecutions, 1)
+ assert.Len(t, response.GetNodeExecutions(), 1)
}
func TestListNodeExecutionsError(t *testing.T) {
@@ -174,9 +174,9 @@ func TestListNodeExecutionsForTask(t *testing.T) {
mockNodeExecutionManager.SetListNodeExecutionsForTaskFunc(
func(ctx context.Context, request *admin.NodeExecutionForTaskListRequest) (
*admin.NodeExecutionList, error) {
- assert.Equal(t, filters, request.Filters)
- assert.Equal(t, uint32(1), request.Limit)
- assert.Equal(t, "20", request.Token)
+ assert.Equal(t, filters, request.GetFilters())
+ assert.Equal(t, uint32(1), request.GetLimit())
+ assert.Equal(t, "20", request.GetToken())
return &admin.NodeExecutionList{
NodeExecutions: []*admin.NodeExecution{
{
@@ -196,7 +196,7 @@ func TestListNodeExecutionsForTask(t *testing.T) {
Token: "20",
})
assert.NoError(t, err)
- assert.Len(t, response.NodeExecutions, 1)
+ assert.Len(t, response.GetNodeExecutions(), 1)
}
func TestListNodeExecutionsForTaskError(t *testing.T) {
@@ -225,7 +225,7 @@ func TestGetNodeExecutionData(t *testing.T) {
mockNodeExecutionManager.SetGetNodeExecutionDataFunc(
func(ctx context.Context,
request *admin.NodeExecutionGetDataRequest) (*admin.NodeExecutionGetDataResponse, error) {
- assert.True(t, proto.Equal(&nodeExecutionID, request.Id))
+ assert.True(t, proto.Equal(&nodeExecutionID, request.GetId()))
return &admin.NodeExecutionGetDataResponse{
Inputs: &admin.UrlBlob{
Url: "inputs",
@@ -249,9 +249,9 @@ func TestGetNodeExecutionData(t *testing.T) {
assert.True(t, proto.Equal(&admin.UrlBlob{
Url: "inputs",
Bytes: 100,
- }, resp.Inputs))
+ }, resp.GetInputs()))
assert.True(t, proto.Equal(&admin.UrlBlob{
Url: "outputs",
Bytes: 200,
- }, resp.Outputs))
+ }, resp.GetOutputs()))
}
diff --git a/flyteadmin/pkg/rpc/adminservice/tests/task_execution_test.go b/flyteadmin/pkg/rpc/adminservice/tests/task_execution_test.go
index b261401905..637426c455 100644
--- a/flyteadmin/pkg/rpc/adminservice/tests/task_execution_test.go
+++ b/flyteadmin/pkg/rpc/adminservice/tests/task_execution_test.go
@@ -47,11 +47,11 @@ func TestTaskExecution(t *testing.T) {
mockTaskExecutionManager.SetCreateTaskEventCallback(
func(ctx context.Context, request *admin.TaskExecutionEventRequest) (
*admin.TaskExecutionEventResponse, error) {
- assert.Equal(t, requestID, request.RequestId)
- assert.NotNil(t, request.Event)
- assert.True(t, proto.Equal(taskID, request.Event.TaskId))
- assert.Equal(t, phase, request.Event.Phase)
- assert.Equal(t, retryAttempt, request.Event.RetryAttempt)
+ assert.Equal(t, requestID, request.GetRequestId())
+ assert.NotNil(t, request.GetEvent())
+ assert.True(t, proto.Equal(taskID, request.GetEvent().GetTaskId()))
+ assert.Equal(t, phase, request.GetEvent().GetPhase())
+ assert.Equal(t, retryAttempt, request.GetEvent().GetRetryAttempt())
return &admin.TaskExecutionEventResponse{}, nil
})
mockServer := NewMockAdminServer(NewMockAdminServerInput{
@@ -143,9 +143,9 @@ func TestTaskExecution(t *testing.T) {
mockTaskExecutionManager.SetGetTaskExecutionCallback(
func(ctx context.Context, request *admin.TaskExecutionGetRequest) (
*admin.TaskExecution, error) {
- assert.Equal(t, taskID, request.Id.TaskId)
- assert.Equal(t, nodeExecutionID, request.Id.NodeExecutionId)
- assert.Equal(t, retryAttempt, request.Id.RetryAttempt)
+ assert.Equal(t, taskID, request.GetId().GetTaskId())
+ assert.Equal(t, nodeExecutionID, request.GetId().GetNodeExecutionId())
+ assert.Equal(t, retryAttempt, request.GetId().GetRetryAttempt())
return &admin.TaskExecution{}, nil
})
mockServer := NewMockAdminServer(NewMockAdminServerInput{
@@ -232,8 +232,8 @@ func TestTaskExecution(t *testing.T) {
mockTaskExecutionManager.SetListTaskExecutionsCallback(
func(ctx context.Context, request *admin.TaskExecutionListRequest) (
*admin.TaskExecutionList, error) {
- assert.Equal(t, "1", request.Token)
- assert.Equal(t, uint32(99), request.Limit)
+ assert.Equal(t, "1", request.GetToken())
+ assert.Equal(t, uint32(99), request.GetLimit())
assert.True(t, proto.Equal(&core.NodeExecutionIdentifier{
NodeId: "nodey",
ExecutionId: &core.WorkflowExecutionIdentifier{
@@ -241,7 +241,7 @@ func TestTaskExecution(t *testing.T) {
Domain: "domain",
Name: "name",
},
- }, request.NodeExecutionId))
+ }, request.GetNodeExecutionId()))
return &admin.TaskExecutionList{}, nil
})
mockServer := NewMockAdminServer(NewMockAdminServerInput{
@@ -344,9 +344,9 @@ func TestGetTaskExecutionData(t *testing.T) {
assert.True(t, proto.Equal(&admin.UrlBlob{
Url: "inputs",
Bytes: 100,
- }, resp.Inputs))
+ }, resp.GetInputs()))
assert.True(t, proto.Equal(&admin.UrlBlob{
Url: "outputs",
Bytes: 200,
- }, resp.Outputs))
+ }, resp.GetOutputs()))
}
diff --git a/flyteadmin/pkg/rpc/adminservice/tests/task_test.go b/flyteadmin/pkg/rpc/adminservice/tests/task_test.go
index 2e4d5a8287..bd17b1baa6 100644
--- a/flyteadmin/pkg/rpc/adminservice/tests/task_test.go
+++ b/flyteadmin/pkg/rpc/adminservice/tests/task_test.go
@@ -49,7 +49,7 @@ func TestTaskError(t *testing.T) {
mockTaskManager.SetCreateCallback(
func(ctx context.Context,
request *admin.TaskCreateRequest) (*admin.TaskCreateResponse, error) {
- return nil, errors.GetMissingEntityError(core.ResourceType_TASK.String(), request.Id)
+ return nil, errors.GetMissingEntityError(core.ResourceType_TASK.String(), request.GetId())
},
)
mockServer := NewMockAdminServer(NewMockAdminServerInput{
@@ -77,7 +77,7 @@ func TestListUniqueTaskIds(t *testing.T) {
mockTaskManager.SetListUniqueIdsFunc(func(ctx context.Context, request *admin.NamedEntityIdentifierListRequest) (
*admin.NamedEntityIdentifierList, error) {
- assert.Equal(t, "staging", request.Domain)
+ assert.Equal(t, "staging", request.GetDomain())
return nil, nil
})
mockServer := NewMockAdminServer(NewMockAdminServerInput{
diff --git a/flyteadmin/pkg/rpc/adminservice/tests/workflow_test.go b/flyteadmin/pkg/rpc/adminservice/tests/workflow_test.go
index 915c127ac2..5799b32519 100644
--- a/flyteadmin/pkg/rpc/adminservice/tests/workflow_test.go
+++ b/flyteadmin/pkg/rpc/adminservice/tests/workflow_test.go
@@ -49,7 +49,7 @@ func TestCreateWorkflowError(t *testing.T) {
mockWorkflowManager.SetCreateCallback(
func(ctx context.Context,
request *admin.WorkflowCreateRequest) (*admin.WorkflowCreateResponse, error) {
- return nil, errors.GetMissingEntityError(core.ResourceType_WORKFLOW.String(), request.Id)
+ return nil, errors.GetMissingEntityError(core.ResourceType_WORKFLOW.String(), request.GetId())
},
)
mockServer := NewMockAdminServer(NewMockAdminServerInput{
diff --git a/flyteadmin/pkg/rpc/adminservice/util/metrics.go b/flyteadmin/pkg/rpc/adminservice/util/metrics.go
index bcab066a41..1274541a9b 100644
--- a/flyteadmin/pkg/rpc/adminservice/util/metrics.go
+++ b/flyteadmin/pkg/rpc/adminservice/util/metrics.go
@@ -54,7 +54,7 @@ func (m *RequestMetrics) Success() {
func newResponseCodeMetrics(scope promutils.Scope) responseCodeMetrics {
responseCodeCounters := make(map[codes.Code]prometheus.Counter)
for i := 0; i < maxGRPCStatusCode; i++ {
- code := codes.Code(i)
+ code := codes.Code(i) // #nosec G115
responseCodeCounters[code] = scope.MustNewCounter(code.String(),
fmt.Sprintf("count of responses returning: %s", code.String()))
}
diff --git a/flyteadmin/pkg/rpc/adminservice/workflow.go b/flyteadmin/pkg/rpc/adminservice/workflow.go
index ee9a6b4eff..0e0b425f7c 100644
--- a/flyteadmin/pkg/rpc/adminservice/workflow.go
+++ b/flyteadmin/pkg/rpc/adminservice/workflow.go
@@ -27,7 +27,7 @@ func (m *AdminService) CreateWorkflow(
func (m *AdminService) GetWorkflow(ctx context.Context, request *admin.ObjectGetRequest) (*admin.Workflow, error) {
// NOTE: When the Get HTTP endpoint is called the resource type is implicit (from the URL) so we must add it
// to the request.
- if request.Id != nil && request.Id.ResourceType == core.ResourceType_UNSPECIFIED {
+ if request.GetId() != nil && request.GetId().GetResourceType() == core.ResourceType_UNSPECIFIED {
logger.Infof(ctx, "Adding resource type for unspecified value in request: [%+v]", request)
request.Id.ResourceType = core.ResourceType_WORKFLOW
}
diff --git a/flyteadmin/pkg/runtime/interfaces/application_configuration.go b/flyteadmin/pkg/runtime/interfaces/application_configuration.go
index 55791a1538..0f729b5cde 100644
--- a/flyteadmin/pkg/runtime/interfaces/application_configuration.go
+++ b/flyteadmin/pkg/runtime/interfaces/application_configuration.go
@@ -212,16 +212,16 @@ func (a *ApplicationConfig) GetAsWorkflowExecutionConfig() *admin.WorkflowExecut
}
// For the others, we only add the field when the field is set in the config.
- if a.GetSecurityContext().RunAs.GetK8SServiceAccount() != "" || a.GetSecurityContext().RunAs.GetIamRole() != "" {
+ if a.GetSecurityContext().GetRunAs().GetK8SServiceAccount() != "" || a.GetSecurityContext().GetRunAs().GetIamRole() != "" {
wec.SecurityContext = a.GetSecurityContext()
}
- if a.GetRawOutputDataConfig().OutputLocationPrefix != "" {
+ if a.GetRawOutputDataConfig().GetOutputLocationPrefix() != "" {
wec.RawOutputDataConfig = a.GetRawOutputDataConfig()
}
- if len(a.GetLabels().Values) > 0 {
+ if len(a.GetLabels().GetValues()) > 0 {
wec.Labels = a.GetLabels()
}
- if len(a.GetAnnotations().Values) > 0 {
+ if len(a.GetAnnotations().GetValues()) > 0 {
wec.Annotations = a.GetAnnotations()
}
diff --git a/flyteadmin/pkg/workflowengine/impl/interface_provider.go b/flyteadmin/pkg/workflowengine/impl/interface_provider.go
index 566613f635..6bae0c9a05 100644
--- a/flyteadmin/pkg/workflowengine/impl/interface_provider.go
+++ b/flyteadmin/pkg/workflowengine/impl/interface_provider.go
@@ -42,8 +42,8 @@ func NewLaunchPlanInterfaceProvider(launchPlan models.LaunchPlan, identifier *co
return &LaunchPlanInterfaceProvider{}, err
}
return &LaunchPlanInterfaceProvider{
- expectedInputs: closure.ExpectedInputs,
- expectedOutputs: closure.ExpectedOutputs,
+ expectedInputs: closure.GetExpectedInputs(),
+ expectedOutputs: closure.GetExpectedOutputs(),
identifier: identifier,
}, nil
}
diff --git a/flyteadmin/pkg/workflowengine/impl/interface_provider_test.go b/flyteadmin/pkg/workflowengine/impl/interface_provider_test.go
index 4d96050f7d..5924dab20c 100644
--- a/flyteadmin/pkg/workflowengine/impl/interface_provider_test.go
+++ b/flyteadmin/pkg/workflowengine/impl/interface_provider_test.go
@@ -64,14 +64,14 @@ func TestGetId(t *testing.T) {
func TestGetExpectedInputs(t *testing.T) {
provider := getProviderForTest(t)
- assert.Contains(t, (*provider.GetExpectedInputs()).Parameters, "foo")
- assert.NotNil(t, (*provider.GetExpectedInputs()).Parameters["foo"].Var.Type.GetSimple())
- assert.EqualValues(t, "STRING", (*provider.GetExpectedInputs()).Parameters["foo"].Var.Type.GetSimple().String())
- assert.NotNil(t, (*provider.GetExpectedInputs()).Parameters["foo"].GetDefault())
+ assert.Contains(t, (*provider.GetExpectedInputs()).GetParameters(), "foo")
+ assert.NotNil(t, (*provider.GetExpectedInputs()).GetParameters()["foo"].GetVar().GetType().GetSimple())
+ assert.EqualValues(t, "STRING", (*provider.GetExpectedInputs()).GetParameters()["foo"].GetVar().GetType().GetSimple().String())
+ assert.NotNil(t, (*provider.GetExpectedInputs()).GetParameters()["foo"].GetDefault())
}
func TestGetExpectedOutputs(t *testing.T) {
provider := getProviderForTest(t)
- assert.EqualValues(t, outputs.Variables["foo"].GetType().GetType(),
- provider.GetExpectedOutputs().Variables["foo"].GetType().GetType())
+ assert.EqualValues(t, outputs.GetVariables()["foo"].GetType().GetType(),
+ provider.GetExpectedOutputs().GetVariables()["foo"].GetType().GetType())
}
diff --git a/flyteadmin/pkg/workflowengine/impl/k8s_executor.go b/flyteadmin/pkg/workflowengine/impl/k8s_executor.go
index d941cc8309..03de2bbba9 100644
--- a/flyteadmin/pkg/workflowengine/impl/k8s_executor.go
+++ b/flyteadmin/pkg/workflowengine/impl/k8s_executor.go
@@ -37,7 +37,7 @@ func (e K8sWorkflowExecutor) Execute(ctx context.Context, data interfaces.Execut
flyteWf, err := e.workflowBuilder.Build(data.WorkflowClosure, data.ExecutionParameters.Inputs, data.ExecutionID, data.Namespace)
if err != nil {
logger.Infof(ctx, "failed to build the workflow [%+v] %v",
- data.WorkflowClosure.Primary.Template.Id, err)
+ data.WorkflowClosure.GetPrimary().GetTemplate().GetId(), err)
return interfaces.ExecutionResponse{}, err
}
err = PrepareFlyteWorkflow(data, flyteWf)
@@ -64,11 +64,11 @@ func (e K8sWorkflowExecutor) Execute(ctx context.Context, data interfaces.Execut
}
executionTargetSpec := executioncluster.ExecutionTargetSpec{
- Project: data.ExecutionID.Project,
- Domain: data.ExecutionID.Domain,
+ Project: data.ExecutionID.GetProject(),
+ Domain: data.ExecutionID.GetDomain(),
Workflow: data.ReferenceWorkflowName,
LaunchPlan: data.ReferenceWorkflowName,
- ExecutionID: data.ExecutionID.Name,
+ ExecutionID: data.ExecutionID.GetName(),
ExecutionClusterLabel: data.ExecutionParameters.ExecutionClusterLabel,
}
targetCluster, err := e.executionCluster.GetTarget(ctx, &executionTargetSpec)
@@ -92,7 +92,7 @@ func (e K8sWorkflowExecutor) Abort(ctx context.Context, data interfaces.AbortDat
TargetID: data.Cluster,
})
if err != nil {
- return errors.NewFlyteAdminErrorf(codes.Internal, err.Error())
+ return errors.NewFlyteAdminErrorf(codes.Internal, err.Error()) //nolint
}
err = target.FlyteClient.FlyteworkflowV1alpha1().FlyteWorkflows(data.Namespace).Delete(ctx, data.ExecutionID.GetName(), v1.DeleteOptions{
PropagationPolicy: &deletePropagationBackground,
diff --git a/flyteadmin/pkg/workflowengine/impl/k8s_executor_test.go b/flyteadmin/pkg/workflowengine/impl/k8s_executor_test.go
index a2ecb51364..5b9db6dfe9 100644
--- a/flyteadmin/pkg/workflowengine/impl/k8s_executor_test.go
+++ b/flyteadmin/pkg/workflowengine/impl/k8s_executor_test.go
@@ -281,7 +281,7 @@ func TestExecute_MiscError(t *testing.T) {
func TestAbort(t *testing.T) {
fakeFlyteWorkflow := FakeFlyteWorkflow{}
fakeFlyteWorkflow.deleteCallback = func(name string, options *v1.DeleteOptions) error {
- assert.Equal(t, execID.Name, name)
+ assert.Equal(t, execID.GetName(), name)
assert.Equal(t, options.PropagationPolicy, &deletePropagationBackground)
return nil
}
@@ -306,7 +306,7 @@ func TestAbort_Notfound(t *testing.T) {
return k8_api_err.NewNotFound(schema.GroupResource{
Group: "foo",
Resource: "bar",
- }, execID.Name)
+ }, execID.GetName())
}
fakeFlyteWF.flyteWorkflowsCallback = func(ns string) v1alpha12.FlyteWorkflowInterface {
assert.Equal(t, namespace, ns)
diff --git a/flyteadmin/pkg/workflowengine/impl/prepare_execution.go b/flyteadmin/pkg/workflowengine/impl/prepare_execution.go
index 169cb15616..70afadbd7b 100644
--- a/flyteadmin/pkg/workflowengine/impl/prepare_execution.go
+++ b/flyteadmin/pkg/workflowengine/impl/prepare_execution.go
@@ -26,20 +26,20 @@ func addMapValues(overrides map[string]string, defaultValues map[string]string)
}
func addPermissions(securityCtx *core.SecurityContext, roleNameKey string, flyteWf *v1alpha1.FlyteWorkflow) {
- if securityCtx == nil || securityCtx.RunAs == nil {
+ if securityCtx == nil || securityCtx.GetRunAs() == nil {
return
}
securityCtxCopy, _ := proto.Clone(securityCtx).(*core.SecurityContext)
flyteWf.SecurityContext = *securityCtxCopy
- if len(securityCtx.RunAs.IamRole) > 0 {
+ if len(securityCtx.GetRunAs().GetIamRole()) > 0 {
if flyteWf.Annotations == nil {
flyteWf.Annotations = map[string]string{}
}
- flyteWf.Annotations[roleNameKey] = securityCtx.RunAs.IamRole
+ flyteWf.Annotations[roleNameKey] = securityCtx.GetRunAs().GetIamRole()
}
- if len(securityCtx.RunAs.K8SServiceAccount) > 0 {
- flyteWf.ServiceAccountName = securityCtx.RunAs.K8SServiceAccount
+ if len(securityCtx.GetRunAs().GetK8SServiceAccount()) > 0 {
+ flyteWf.ServiceAccountName = securityCtx.GetRunAs().GetK8SServiceAccount()
}
}
@@ -53,14 +53,14 @@ func addExecutionOverrides(taskPluginOverrides []*admin.PluginOverride,
},
}
for _, override := range taskPluginOverrides {
- executionConfig.TaskPluginImpls[override.TaskType] = v1alpha1.TaskPluginOverride{
- PluginIDs: override.PluginId,
- MissingPluginBehavior: override.MissingPluginBehavior,
+ executionConfig.TaskPluginImpls[override.GetTaskType()] = v1alpha1.TaskPluginOverride{
+ PluginIDs: override.GetPluginId(),
+ MissingPluginBehavior: override.GetMissingPluginBehavior(),
}
}
if workflowExecutionConfig != nil {
- executionConfig.MaxParallelism = uint32(workflowExecutionConfig.MaxParallelism)
+ executionConfig.MaxParallelism = uint32(workflowExecutionConfig.GetMaxParallelism()) // #nosec G115
if workflowExecutionConfig.GetInterruptible() != nil {
interruptible := workflowExecutionConfig.GetInterruptible().GetValue()
@@ -71,8 +71,8 @@ func addExecutionOverrides(taskPluginOverrides []*admin.PluginOverride,
envs := make(map[string]string)
if workflowExecutionConfig.GetEnvs() != nil {
- for _, v := range workflowExecutionConfig.GetEnvs().Values {
- envs[v.Key] = v.Value
+ for _, v := range workflowExecutionConfig.GetEnvs().GetValues() {
+ envs[v.GetKey()] = v.GetValue()
}
executionConfig.EnvironmentVariables = envs
}
@@ -134,7 +134,7 @@ func PrepareFlyteWorkflow(data interfaces.ExecutionData, flyteWorkflow *v1alpha1
// add permissions from auth and security context. Adding permissions from auth would be removed once all clients
// have migrated over to security context
- addPermissions(data.ExecutionParameters.ExecutionConfig.SecurityContext,
+ addPermissions(data.ExecutionParameters.ExecutionConfig.GetSecurityContext(),
data.ExecutionParameters.RoleNameKey, flyteWorkflow)
labels := addMapValues(data.ExecutionParameters.Labels, flyteWorkflow.Labels)
diff --git a/flyteadmin/scheduler/dbapi/event_scheduler_impl.go b/flyteadmin/scheduler/dbapi/event_scheduler_impl.go
index 972a04480f..bffcace058 100644
--- a/flyteadmin/scheduler/dbapi/event_scheduler_impl.go
+++ b/flyteadmin/scheduler/dbapi/event_scheduler_impl.go
@@ -36,10 +36,10 @@ func (s *eventScheduler) AddSchedule(ctx context.Context, input interfaces.AddSc
var fixedRateUnit admin.FixedRateUnit
switch v := input.ScheduleExpression.GetScheduleExpression().(type) {
case *admin.Schedule_Rate:
- fixedRateValue = v.Rate.Value
- fixedRateUnit = v.Rate.Unit
+ fixedRateValue = v.Rate.GetValue()
+ fixedRateUnit = v.Rate.GetUnit()
case *admin.Schedule_CronSchedule:
- cronString = v.CronSchedule.Schedule
+ cronString = v.CronSchedule.GetSchedule()
default:
return fmt.Errorf("failed adding schedule for unknown schedule expression type %v", v)
}
@@ -48,13 +48,13 @@ func (s *eventScheduler) AddSchedule(ctx context.Context, input interfaces.AddSc
CronExpression: cronString,
FixedRateValue: fixedRateValue,
Unit: fixedRateUnit,
- KickoffTimeInputArg: input.ScheduleExpression.KickoffTimeInputArg,
+ KickoffTimeInputArg: input.ScheduleExpression.GetKickoffTimeInputArg(),
Active: &active,
SchedulableEntityKey: models.SchedulableEntityKey{
- Project: input.Identifier.Project,
- Domain: input.Identifier.Domain,
- Name: input.Identifier.Name,
- Version: input.Identifier.Version,
+ Project: input.Identifier.GetProject(),
+ Domain: input.Identifier.GetDomain(),
+ Name: input.Identifier.GetName(),
+ Version: input.Identifier.GetVersion(),
},
}
err := s.db.SchedulableEntityRepo().Activate(ctx, modelInput)
@@ -69,10 +69,10 @@ func (s *eventScheduler) RemoveSchedule(ctx context.Context, input interfaces.Re
logger.Infof(ctx, "Received call to remove schedule [%+v]. Will deactivate it in the scheduler", input.Identifier)
err := s.db.SchedulableEntityRepo().Deactivate(ctx, models.SchedulableEntityKey{
- Project: input.Identifier.Project,
- Domain: input.Identifier.Domain,
- Name: input.Identifier.Name,
- Version: input.Identifier.Version,
+ Project: input.Identifier.GetProject(),
+ Domain: input.Identifier.GetDomain(),
+ Name: input.Identifier.GetName(),
+ Version: input.Identifier.GetVersion(),
})
if err != nil {
diff --git a/flyteadmin/scheduler/identifier/identifier.go b/flyteadmin/scheduler/identifier/identifier.go
index 5d386e8652..8db71863b7 100644
--- a/flyteadmin/scheduler/identifier/identifier.go
+++ b/flyteadmin/scheduler/identifier/identifier.go
@@ -44,7 +44,7 @@ func GetExecutionIdentifier(ctx context.Context, identifier *core.Identifier, sc
func hashIdentifier(ctx context.Context, identifier *core.Identifier) uint64 {
h := fnv.New64()
_, err := h.Write([]byte(fmt.Sprintf(scheduleNameInputsFormat,
- identifier.Project, identifier.Domain, identifier.Name, identifier.Version)))
+ identifier.GetProject(), identifier.GetDomain(), identifier.GetName(), identifier.GetVersion())))
if err != nil {
// This shouldn't occur.
logger.Errorf(ctx,
@@ -59,7 +59,7 @@ func hashIdentifier(ctx context.Context, identifier *core.Identifier) uint64 {
func hashScheduledTimeStamp(ctx context.Context, identifier *core.Identifier, scheduledTime time.Time) uint64 {
h := fnv.New64()
_, err := h.Write([]byte(fmt.Sprintf(executionIDInputsFormat,
- identifier.Project, identifier.Domain, identifier.Name, identifier.Version, scheduledTime.Unix())))
+ identifier.GetProject(), identifier.GetDomain(), identifier.GetName(), identifier.GetVersion(), scheduledTime.Unix())))
if err != nil {
// This shouldn't occur.
logger.Errorf(ctx,
diff --git a/flytecopilot/.golangci.yml b/flytecopilot/.golangci.yml
index 7f4dbc80e8..71a85ec5c3 100644
--- a/flytecopilot/.golangci.yml
+++ b/flytecopilot/.golangci.yml
@@ -1,35 +1,25 @@
-# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES.
-# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY:
-#
-# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst
-
run:
skip-dirs:
- pkg/client
-
linters:
disable-all: true
enable:
- - deadcode
- errcheck
- - gas
+ - gosec
- gci
- goconst
- goimports
- - golint
- gosimple
- govet
- ineffassign
- misspell
- nakedret
- staticcheck
- - structcheck
- typecheck
- unconvert
- unparam
- unused
- - varcheck
-
+ - protogetter
linters-settings:
gci:
custom-order: true
@@ -38,3 +28,5 @@ linters-settings:
- default
- prefix(github.com/flyteorg)
skip-generated: true
+ goconst:
+ ignore-tests: true
diff --git a/flytecopilot/cmd/download_test.go b/flytecopilot/cmd/download_test.go
index 16cda7c67d..b96ffd46a6 100644
--- a/flytecopilot/cmd/download_test.go
+++ b/flytecopilot/cmd/download_test.go
@@ -182,8 +182,8 @@ func TestDownloadOptions_Download(t *testing.T) {
errProto := &core.ErrorDocument{}
err = store.ReadProtobuf(ctx, errFile, errProto)
assert.NoError(t, err)
- if assert.NotNil(t, errProto.Error) {
- assert.Equal(t, core.ContainerError_RECOVERABLE, errProto.Error.Kind)
+ if assert.NotNil(t, errProto.GetError()) {
+ assert.Equal(t, core.ContainerError_RECOVERABLE, errProto.GetError().GetKind())
}
})
}
diff --git a/flytecopilot/cmd/sidecar.go b/flytecopilot/cmd/sidecar.go
index 09abdb31e5..179d6362f8 100644
--- a/flytecopilot/cmd/sidecar.go
+++ b/flytecopilot/cmd/sidecar.go
@@ -70,9 +70,9 @@ func (u *UploadOptions) uploader(ctx context.Context) error {
logger.Errorf(ctx, "Bad interface passed, failed to unmarshal err: %s", err)
return errors.Wrap(err, "Bad interface passed, failed to unmarshal, expected core.TypedInterface")
}
- outputInterface := iface.Outputs
+ outputInterface := iface.GetOutputs()
- if iface.Outputs == nil || iface.Outputs.Variables == nil || len(iface.Outputs.Variables) == 0 {
+ if iface.GetOutputs() == nil || iface.Outputs.Variables == nil || len(iface.GetOutputs().GetVariables()) == 0 {
logger.Infof(ctx, "Empty output interface received. Assuming void outputs. Sidecar will exit immediately.")
return nil
}
diff --git a/flytecopilot/cmd/sidecar_test.go b/flytecopilot/cmd/sidecar_test.go
index a7cc1c964a..2932e6fa9c 100644
--- a/flytecopilot/cmd/sidecar_test.go
+++ b/flytecopilot/cmd/sidecar_test.go
@@ -87,7 +87,7 @@ func TestUploadOptions_Upload(t *testing.T) {
}
success := path.Join(tmpDir, SuccessFile)
- assert.NoError(t, ioutil.WriteFile(success, []byte("done"), os.ModePerm))
+ assert.NoError(t, os.WriteFile(success, []byte("done"), os.ModePerm)) // #nosec G306
ok, err := containerwatcher.FileExists(success)
assert.NoError(t, err)
assert.True(t, ok, "successfile not created")
diff --git a/flytecopilot/data/download.go b/flytecopilot/data/download.go
index e4efa22222..73d6e3be53 100644
--- a/flytecopilot/data/download.go
+++ b/flytecopilot/data/download.go
@@ -5,7 +5,6 @@ import (
"encoding/json"
"fmt"
"io"
- "io/ioutil"
"os"
"path"
"path/filepath"
@@ -52,13 +51,13 @@ func (d Downloader) handleBlob(ctx context.Context, blob *core.Blob, toPath stri
(download each part) (error on write or directory) (close streams safely, track success) (completion or report missing closures)
*/
- blobRef := storage.DataReference(blob.Uri)
+ blobRef := storage.DataReference(blob.GetUri())
scheme, _, _, err := blobRef.Split()
if err != nil {
return nil, errors.Wrapf(err, "Blob uri incorrectly formatted")
}
- if blob.GetMetadata().GetType().Dimensionality == core.BlobType_MULTIPART {
+ if blob.GetMetadata().GetType().GetDimensionality() == core.BlobType_MULTIPART {
// Collect all parts of the multipart blob recursively (List API handles nested directories)
// Set maxItems to 100 as a parameter for the List API, enabling batch retrieval of items until all are downloaded
maxItems := 100
@@ -173,7 +172,7 @@ func (d Downloader) handleBlob(ctx context.Context, blob *core.Blob, toPath stri
}
logger.Infof(ctx, "successfully copied %d remote files from [%s] to local [%s]", downloadSuccess, blobRef, toPath)
return toPath, nil
- } else if blob.GetMetadata().GetType().Dimensionality == core.BlobType_SINGLE {
+ } else if blob.GetMetadata().GetType().GetDimensionality() == core.BlobType_SINGLE {
// reader should be declared here (avoid being shared across all goroutines)
var reader io.ReadCloser
if scheme == "http" || scheme == "https" {
@@ -214,14 +213,14 @@ func (d Downloader) handleBlob(ctx context.Context, blob *core.Blob, toPath stri
}
func (d Downloader) handleSchema(ctx context.Context, schema *core.Schema, toFilePath string) (interface{}, error) {
- return d.handleBlob(ctx, &core.Blob{Uri: schema.Uri, Metadata: &core.BlobMetadata{Type: &core.BlobType{Dimensionality: core.BlobType_MULTIPART}}}, toFilePath)
+ return d.handleBlob(ctx, &core.Blob{Uri: schema.GetUri(), Metadata: &core.BlobMetadata{Type: &core.BlobType{Dimensionality: core.BlobType_MULTIPART}}}, toFilePath)
}
func (d Downloader) handleBinary(_ context.Context, b *core.Binary, toFilePath string, writeToFile bool) (interface{}, error) {
// maybe we should return a map
v := b.GetValue()
if writeToFile {
- return v, ioutil.WriteFile(toFilePath, v, os.ModePerm)
+ return v, os.WriteFile(toFilePath, v, os.ModePerm) // #nosec G306
}
return v, nil
}
@@ -229,9 +228,9 @@ func (d Downloader) handleBinary(_ context.Context, b *core.Binary, toFilePath s
func (d Downloader) handleError(_ context.Context, b *core.Error, toFilePath string, writeToFile bool) (interface{}, error) {
// maybe we should return a map
if writeToFile {
- return b.Message, ioutil.WriteFile(toFilePath, []byte(b.Message), os.ModePerm)
+ return b.GetMessage(), os.WriteFile(toFilePath, []byte(b.GetMessage()), os.ModePerm) // #nosec G306
}
- return b.Message, nil
+ return b.GetMessage(), nil
}
func (d Downloader) handleGeneric(ctx context.Context, b *structpb.Struct, toFilePath string, writeToFile bool) (interface{}, error) {
@@ -259,7 +258,7 @@ func (d Downloader) handlePrimitive(primitive *core.Primitive, toFilePath string
var v interface{}
var err error
- switch primitive.Value.(type) {
+ switch primitive.GetValue().(type) {
case *core.Primitive_StringValue:
v = primitive.GetStringValue()
toByteArray = func() ([]byte, error) {
@@ -307,7 +306,7 @@ func (d Downloader) handlePrimitive(primitive *core.Primitive, toFilePath string
if err != nil {
return nil, err
}
- return v, ioutil.WriteFile(toFilePath, b, os.ModePerm)
+ return v, os.WriteFile(toFilePath, b, os.ModePerm) // #nosec G306
}
return v, nil
}
@@ -321,11 +320,11 @@ func (d Downloader) handleScalar(ctx context.Context, scalar *core.Scalar, toFil
case *core.Scalar_Blob:
b := scalar.GetBlob()
i, err := d.handleBlob(ctx, b, toFilePath)
- return i, &core.Scalar{Value: &core.Scalar_Blob{Blob: &core.Blob{Metadata: b.Metadata, Uri: toFilePath}}}, err
+ return i, &core.Scalar{Value: &core.Scalar_Blob{Blob: &core.Blob{Metadata: b.GetMetadata(), Uri: toFilePath}}}, err
case *core.Scalar_Schema:
b := scalar.GetSchema()
i, err := d.handleSchema(ctx, b, toFilePath)
- return i, &core.Scalar{Value: &core.Scalar_Schema{Schema: &core.Schema{Type: b.Type, Uri: toFilePath}}}, err
+ return i, &core.Scalar{Value: &core.Scalar_Schema{Schema: &core.Schema{Type: b.GetType(), Uri: toFilePath}}}, err
case *core.Scalar_Binary:
b := scalar.GetBinary()
i, err := d.handleBinary(ctx, b, toFilePath, writeToFile)
@@ -340,7 +339,7 @@ func (d Downloader) handleScalar(ctx context.Context, scalar *core.Scalar, toFil
return i, scalar, err
case *core.Scalar_NoneType:
if writeToFile {
- return nil, scalar, ioutil.WriteFile(toFilePath, []byte("null"), os.ModePerm)
+ return nil, scalar, os.WriteFile(toFilePath, []byte("null"), os.ModePerm) // #nosec G306
}
return nil, scalar, nil
default:
@@ -381,12 +380,12 @@ func (d Downloader) handleLiteral(ctx context.Context, lit *core.Literal, filePa
// Collection should be stored as a top level list file and may have accompanying files?
func (d Downloader) handleCollection(ctx context.Context, c *core.LiteralCollection, dir string, writePrimitiveToFile bool) ([]interface{}, *core.LiteralCollection, error) {
- if c == nil || len(c.Literals) == 0 {
+ if c == nil || len(c.GetLiterals()) == 0 {
return []interface{}{}, c, nil
}
var collection []interface{}
litCollection := &core.LiteralCollection{}
- for i, lit := range c.Literals {
+ for i, lit := range c.GetLiterals() {
filePath := path.Join(dir, strconv.Itoa(i))
v, lit, err := d.handleLiteral(ctx, lit, filePath, writePrimitiveToFile)
if err != nil {
@@ -406,11 +405,11 @@ type downloadedResult struct {
func (d Downloader) RecursiveDownload(ctx context.Context, inputs *core.LiteralMap, dir string, writePrimitiveToFile bool) (VarMap, *core.LiteralMap, error) {
childCtx, cancel := context.WithCancel(ctx)
defer cancel()
- if inputs == nil || len(inputs.Literals) == 0 {
+ if inputs == nil || len(inputs.GetLiterals()) == 0 {
return VarMap{}, nil, nil
}
- f := make(FutureMap, len(inputs.Literals))
- for variable, literal := range inputs.Literals {
+ f := make(FutureMap, len(inputs.GetLiterals()))
+ for variable, literal := range inputs.GetLiterals() {
varPath := path.Join(dir, variable)
lit := literal
f[variable] = futures.NewAsyncFuture(childCtx, func(ctx2 context.Context) (interface{}, error) {
@@ -468,7 +467,8 @@ func (d Downloader) DownloadInputs(ctx context.Context, inputRef storage.DataRef
if err != nil {
return err
}
- if err := ioutil.WriteFile(path.Join(outputDir, "inputs.pb"), b, os.ModePerm); err != nil {
+ // #nosec G306
+ if err := os.WriteFile(path.Join(outputDir, "inputs.pb"), b, os.ModePerm); err != nil {
return err
}
@@ -477,14 +477,14 @@ func (d Downloader) DownloadInputs(ctx context.Context, inputRef storage.DataRef
if err != nil {
return errors.Wrapf(err, "failed to marshal out inputs")
}
- return ioutil.WriteFile(path.Join(outputDir, "inputs.json"), m, os.ModePerm)
+ return os.WriteFile(path.Join(outputDir, "inputs.json"), m, os.ModePerm) // #nosec G306
}
if d.format == core.DataLoadingConfig_YAML {
m, err := yaml.Marshal(varMap)
if err != nil {
return errors.Wrapf(err, "failed to marshal out inputs")
}
- return ioutil.WriteFile(path.Join(outputDir, "inputs.yaml"), m, os.ModePerm)
+ return os.WriteFile(path.Join(outputDir, "inputs.yaml"), m, os.ModePerm) // #nosec G306
}
return nil
}
diff --git a/flytecopilot/data/upload.go b/flytecopilot/data/upload.go
index 6cb3831b4c..2103028d28 100644
--- a/flytecopilot/data/upload.go
+++ b/flytecopilot/data/upload.go
@@ -135,8 +135,8 @@ func (u Uploader) RecursiveUpload(ctx context.Context, vars *core.VariableMap, f
return errors.Errorf("User Error: %s", string(b))
}
- varFutures := make(map[string]futures.Future, len(vars.Variables))
- for varName, variable := range vars.Variables {
+ varFutures := make(map[string]futures.Future, len(vars.GetVariables()))
+ for varName, variable := range vars.GetVariables() {
varPath := path.Join(fromPath, varName)
varType := variable.GetType()
switch varType.GetType().(type) {
diff --git a/flytecopilot/data/upload_test.go b/flytecopilot/data/upload_test.go
index a98595a918..0d51d3c8df 100644
--- a/flytecopilot/data/upload_test.go
+++ b/flytecopilot/data/upload_test.go
@@ -3,7 +3,7 @@ package data
import (
"context"
"fmt"
- "io/ioutil"
+ "io"
"os"
"path"
"testing"
@@ -21,7 +21,7 @@ func TestUploader_RecursiveUpload(t *testing.T) {
tmpPrefix := "upload_test"
t.Run("upload-blob", func(t *testing.T) {
- tmpDir, err := ioutil.TempDir(tmpFolderLocation, tmpPrefix)
+ tmpDir, err := os.MkdirTemp(tmpFolderLocation, tmpPrefix)
assert.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(tmpDir))
@@ -36,7 +36,7 @@ func TestUploader_RecursiveUpload(t *testing.T) {
}
data := []byte("data")
- assert.NoError(t, ioutil.WriteFile(path.Join(tmpDir, "x"), data, os.ModePerm))
+ assert.NoError(t, os.WriteFile(path.Join(tmpDir, "x"), data, os.ModePerm)) // #nosec G306
fmt.Printf("Written to %s ", path.Join(tmpDir, "x"))
store, err := storage.NewDataStore(&storage.Config{Type: storage.TypeMemory}, promutils.NewTestScope())
@@ -49,15 +49,15 @@ func TestUploader_RecursiveUpload(t *testing.T) {
outputs := &core.LiteralMap{}
assert.NoError(t, store.ReadProtobuf(context.TODO(), outputRef, outputs))
- assert.Len(t, outputs.Literals, 1)
- assert.NotNil(t, outputs.Literals["x"])
- assert.NotNil(t, outputs.Literals["x"].GetScalar())
- assert.NotNil(t, outputs.Literals["x"].GetScalar().GetBlob())
- ref := storage.DataReference(outputs.Literals["x"].GetScalar().GetBlob().GetUri())
+ assert.Len(t, outputs.GetLiterals(), 1)
+ assert.NotNil(t, outputs.GetLiterals()["x"])
+ assert.NotNil(t, outputs.GetLiterals()["x"].GetScalar())
+ assert.NotNil(t, outputs.GetLiterals()["x"].GetScalar().GetBlob())
+ ref := storage.DataReference(outputs.GetLiterals()["x"].GetScalar().GetBlob().GetUri())
r, err := store.ReadRaw(context.TODO(), ref)
assert.NoError(t, err, "%s does not exist", ref)
defer r.Close()
- b, err := ioutil.ReadAll(r)
+ b, err := io.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, string(data), string(b), "content dont match")
})
diff --git a/flytecopilot/data/utils_test.go b/flytecopilot/data/utils_test.go
index 56cc3cc426..517f9d30ef 100644
--- a/flytecopilot/data/utils_test.go
+++ b/flytecopilot/data/utils_test.go
@@ -3,7 +3,6 @@ package data
import (
"bytes"
"context"
- "io/ioutil"
"os"
"path"
"testing"
@@ -19,7 +18,7 @@ func TestIsFileReadable(t *testing.T) {
tmpFolderLocation := ""
tmpPrefix := "util_test"
- tmpDir, err := ioutil.TempDir(tmpFolderLocation, tmpPrefix)
+ tmpDir, err := os.MkdirTemp(tmpFolderLocation, tmpPrefix)
assert.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(tmpDir))
@@ -30,7 +29,7 @@ func TestIsFileReadable(t *testing.T) {
assert.Empty(t, f)
assert.Nil(t, i)
- assert.NoError(t, ioutil.WriteFile(p, []byte("data"), os.ModePerm))
+ assert.NoError(t, os.WriteFile(p, []byte("data"), os.ModePerm)) // #nosec G306
f, i, err = IsFileReadable(p, false)
assert.NoError(t, err)
assert.Equal(t, p, f)
@@ -42,7 +41,7 @@ func TestIsFileReadable(t *testing.T) {
_, _, err = IsFileReadable(noExt, false)
assert.Error(t, err)
- assert.NoError(t, ioutil.WriteFile(p, []byte("data"), os.ModePerm))
+ assert.NoError(t, os.WriteFile(p, []byte("data"), os.ModePerm)) // #nosec G306
_, _, err = IsFileReadable(noExt, false)
assert.Error(t, err)
@@ -57,7 +56,7 @@ func TestUploadFile(t *testing.T) {
tmpFolderLocation := ""
tmpPrefix := "util_test"
- tmpDir, err := ioutil.TempDir(tmpFolderLocation, tmpPrefix)
+ tmpDir, err := os.MkdirTemp(tmpFolderLocation, tmpPrefix)
assert.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(tmpDir))
@@ -66,7 +65,7 @@ func TestUploadFile(t *testing.T) {
exist := path.Join(tmpDir, "exist-file")
data := []byte("data")
l := int64(len(data))
- assert.NoError(t, ioutil.WriteFile(exist, data, os.ModePerm))
+ assert.NoError(t, os.WriteFile(exist, data, os.ModePerm)) // #nosec G306
nonExist := path.Join(tmpDir, "non-exist-file")
store, err := storage.NewDataStore(&storage.Config{Type: storage.TypeMemory}, promutils.NewTestScope())
diff --git a/flytectl/.golangci.yml b/flytectl/.golangci.yml
index b7e8525336..12de11bf56 100644
--- a/flytectl/.golangci.yml
+++ b/flytectl/.golangci.yml
@@ -1,37 +1,30 @@
-# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES.
-# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY:
-#
-# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst
-
run:
skip-dirs:
- pkg/client
-
linters:
disable-all: true
enable:
- - deadcode
- errcheck
- - gas
+ - gosec
- gci
- goconst
- goimports
- - golint
- gosimple
- govet
- ineffassign
- misspell
- nakedret
- staticcheck
- - structcheck
- typecheck
- unconvert
- unparam
- unused
- - varcheck
+ - protogetter
linters-settings:
gci:
skip-generated: true
+ goconst:
+ ignore-tests: true
issues:
exclude:
- copylocks
diff --git a/flytectl/cmd/compile/compile.go b/flytectl/cmd/compile/compile.go
index 22c4796e1d..7c5adcf56f 100644
--- a/flytectl/cmd/compile/compile.go
+++ b/flytectl/cmd/compile/compile.go
@@ -61,16 +61,16 @@ func compileFromPackage(packagePath string) error {
case *admin.TaskSpec:
tasks = append(tasks, v)
case *admin.WorkflowSpec:
- workflows[v.Template.Id.Name] = v
+ workflows[v.GetTemplate().GetId().GetName()] = v
case *admin.LaunchPlan:
- plans[v.Id.Name] = v
+ plans[v.GetId().GetName()] = v
}
}
// compile tasks
taskTemplates := []*core.TaskTemplate{}
for _, task := range tasks {
- taskTemplates = append(taskTemplates, task.Template)
+ taskTemplates = append(taskTemplates, task.GetTemplate())
}
fmt.Println("\nCompiling tasks...")
@@ -107,13 +107,13 @@ func handleWorkflow(
compiledLaunchPlanProviders []common.InterfaceProvider,
plans map[string]*admin.LaunchPlan,
workflows map[string]*admin.WorkflowSpec) ([]common.InterfaceProvider, error) {
- reqs, _ := compiler.GetRequirements(workflow.Template, workflow.SubWorkflows)
- wfName := workflow.Template.Id.Name
+ reqs, _ := compiler.GetRequirements(workflow.GetTemplate(), workflow.GetSubWorkflows())
+ wfName := workflow.GetTemplate().GetId().GetName()
// Check if all the subworkflows referenced by launchplan are compiled
for i := range reqs.GetRequiredLaunchPlanIds() {
lpID := reqs.GetRequiredLaunchPlanIds()[i]
- lpWfName := plans[lpID.Name].Spec.WorkflowId.Name
+ lpWfName := plans[lpID.GetName()].GetSpec().GetWorkflowId().GetName()
missingWorkflow := workflows[lpWfName]
if compiledWorkflows[lpWfName] == nil {
// Recursively compile the missing workflow first
@@ -127,8 +127,8 @@ func handleWorkflow(
fmt.Println("\nCompiling workflow:", wfName)
- wf, err := compiler.CompileWorkflow(workflow.Template,
- workflow.SubWorkflows,
+ wf, err := compiler.CompileWorkflow(workflow.GetTemplate(),
+ workflow.GetSubWorkflows(),
compiledTasks,
compiledLaunchPlanProviders)
@@ -140,11 +140,11 @@ func handleWorkflow(
// Update the expected inputs and outputs for the launchplans which reference this workflow
for _, plan := range plans {
- if plan.Spec.WorkflowId.Name == wfName {
- plan.Closure.ExpectedOutputs = wf.Primary.Template.Interface.Outputs
+ if plan.GetSpec().GetWorkflowId().GetName() == wfName {
+ plan.Closure.ExpectedOutputs = wf.GetPrimary().GetTemplate().GetInterface().GetOutputs()
newMap := make(map[string]*core.Parameter)
- for key, value := range wf.Primary.Template.Interface.Inputs.Variables {
+ for key, value := range wf.GetPrimary().GetTemplate().GetInterface().GetInputs().GetVariables() {
newMap[key] = &core.Parameter{
Var: value,
}
diff --git a/flytectl/cmd/config/subcommand/project/project_config.go b/flytectl/cmd/config/subcommand/project/project_config.go
index d76030faa7..fd71628448 100644
--- a/flytectl/cmd/config/subcommand/project/project_config.go
+++ b/flytectl/cmd/config/subcommand/project/project_config.go
@@ -73,18 +73,18 @@ func (c *ConfigProject) GetProjectSpec(cf *config.Config) (*admin.Project, error
}
project := cf.Project
- if len(projectSpec.Id) == 0 && len(project) == 0 {
- err := fmt.Errorf(clierrors.ErrProjectNotPassed)
+ if len(projectSpec.GetId()) == 0 && len(project) == 0 {
+ err := fmt.Errorf(clierrors.ErrProjectNotPassed) //nolint
return nil, err
}
- if len(projectSpec.Id) > 0 && len(project) > 0 {
- err := fmt.Errorf(clierrors.ErrProjectIDBothPassed)
+ if len(projectSpec.GetId()) > 0 && len(project) > 0 {
+ err := fmt.Errorf(clierrors.ErrProjectIDBothPassed) //nolint
return nil, err
}
// Get projectId from file, if not provided, fall back to project
- if len(projectSpec.Id) == 0 {
+ if len(projectSpec.GetId()) == 0 {
projectSpec.Id = project
}
return &projectSpec, nil
@@ -104,7 +104,7 @@ func (c *ConfigProject) MapToAdminState() (admin.Project_ProjectState, error) {
if activate || archive {
if activate == archive {
- return admin.Project_ACTIVE, fmt.Errorf(clierrors.ErrInvalidStateUpdate)
+ return admin.Project_ACTIVE, fmt.Errorf(clierrors.ErrInvalidStateUpdate) //nolint
}
if archive {
return admin.Project_ARCHIVED, nil
diff --git a/flytectl/cmd/config/subcommand/project/project_config_test.go b/flytectl/cmd/config/subcommand/project/project_config_test.go
index bbaf521375..b111cace09 100644
--- a/flytectl/cmd/config/subcommand/project/project_config_test.go
+++ b/flytectl/cmd/config/subcommand/project/project_config_test.go
@@ -20,7 +20,7 @@ func TestGetProjectSpec(t *testing.T) {
}
response, err := c.GetProjectSpec(cf)
assert.Nil(t, err)
- assert.Equal(t, "flytesnacks1", response.Id)
+ assert.Equal(t, "flytesnacks1", response.GetId())
})
t.Run("Error if project and ID both exist", func(t *testing.T) {
@@ -38,8 +38,8 @@ func TestGetProjectSpec(t *testing.T) {
}
response, err := c.GetProjectSpec(&config.Config{})
assert.Nil(t, err)
- assert.Equal(t, "flytesnacks", response.Name)
- assert.Equal(t, "flytesnacks test", response.Description)
+ assert.Equal(t, "flytesnacks", response.GetName())
+ assert.Equal(t, "flytesnacks test", response.GetDescription())
})
}
diff --git a/flytectl/cmd/config/subcommand/project_domain_workflow_getter.go b/flytectl/cmd/config/subcommand/project_domain_workflow_getter.go
index d8fd83f043..fbf43964bd 100644
--- a/flytectl/cmd/config/subcommand/project_domain_workflow_getter.go
+++ b/flytectl/cmd/config/subcommand/project_domain_workflow_getter.go
@@ -33,7 +33,7 @@ func (g PDWGetterCommandLine) GetDomain() string {
// GetWorkflow returns the first argument from the commandline
func (g PDWGetterCommandLine) GetWorkflow() string {
- if g.Args == nil || len(g.Args) == 0 {
+ if len(g.Args) == 0 {
return ""
}
return g.Args[0]
diff --git a/flytectl/cmd/create/execution.go b/flytectl/cmd/create/execution.go
index 5da311357b..ff8b57fea8 100644
--- a/flytectl/cmd/create/execution.go
+++ b/flytectl/cmd/create/execution.go
@@ -260,7 +260,7 @@ func createExecutionCommand(ctx context.Context, args []string, cmdCtx cmdCore.C
if _err != nil {
return _err
}
- fmt.Printf("execution identifier %v\n", exec.Id)
+ fmt.Printf("execution identifier %v\n", exec.GetId())
}
return nil
}
diff --git a/flytectl/cmd/create/execution_test.go b/flytectl/cmd/create/execution_test.go
index d01b683e02..59fb2b0213 100644
--- a/flytectl/cmd/create/execution_test.go
+++ b/flytectl/cmd/create/execution_test.go
@@ -264,7 +264,7 @@ func (s *createSuite) Test_CreateRelaunchExecution() {
Name: "f652ea3596e7f4d80a0e",
},
}
- executionConfig.Relaunch = relaunchExecResponse.Id.Name
+ executionConfig.Relaunch = relaunchExecResponse.GetId().GetName()
relaunchRequest := &admin.ExecutionRelaunchRequest{
Id: &core.WorkflowExecutionIdentifier{
Name: executionConfig.Relaunch,
diff --git a/flytectl/cmd/create/execution_util.go b/flytectl/cmd/create/execution_util.go
index bcb5c5639f..4b5813ec1e 100644
--- a/flytectl/cmd/create/execution_util.go
+++ b/flytectl/cmd/create/execution_util.go
@@ -51,7 +51,7 @@ func createExecutionRequestForWorkflow(ctx context.Context, workflowName, projec
}
}
- return createExecutionRequest(lp.Id, inputs, envs, securityContext, authRole, targetExecName, executionConfig.TargetExecutionCluster), nil
+ return createExecutionRequest(lp.GetId(), inputs, envs, securityContext, authRole, targetExecName, executionConfig.TargetExecutionCluster), nil
}
func createExecutionRequestForTask(ctx context.Context, taskName string, project string, domain string,
@@ -95,8 +95,8 @@ func createExecutionRequestForTask(ctx context.Context, taskName string, project
ResourceType: core.ResourceType_TASK,
Project: project,
Domain: domain,
- Name: task.Id.Name,
- Version: task.Id.Version,
+ Name: task.GetId().GetName(),
+ Version: task.GetId().GetVersion(),
}
return createExecutionRequest(id, inputs, envs, securityContext, authRole, targetExecName, executionConfig.TargetExecutionCluster), nil
@@ -120,7 +120,7 @@ func relaunchExecution(ctx context.Context, executionName string, project string
if err != nil {
return err
}
- fmt.Printf("execution identifier %v\n", relaunchedExec.Id)
+ fmt.Printf("execution identifier %v\n", relaunchedExec.GetId())
return nil
}
@@ -141,7 +141,7 @@ func recoverExecution(ctx context.Context, executionName string, project string,
if err != nil {
return err
}
- fmt.Printf("execution identifier %v\n", recoveredExec.Id)
+ fmt.Printf("execution identifier %v\n", recoveredExec.GetId())
return nil
}
diff --git a/flytectl/cmd/create/execution_util_test.go b/flytectl/cmd/create/execution_util_test.go
index e27ba4a96b..c77c1c194b 100644
--- a/flytectl/cmd/create/execution_util_test.go
+++ b/flytectl/cmd/create/execution_util_test.go
@@ -134,7 +134,7 @@ func TestCreateExecutionRequestForWorkflow(t *testing.T) {
execCreateRequest, err := createExecutionRequestForWorkflow(s.Ctx, "wfName", config.GetConfig().Project, config.GetConfig().Domain, s.CmdCtx, executionConfigWithEnvs, "")
assert.Nil(t, err)
assert.NotNil(t, execCreateRequest)
- assert.Equal(t, "cluster", execCreateRequest.Spec.ExecutionClusterLabel.Value)
+ assert.Equal(t, "cluster", execCreateRequest.GetSpec().GetExecutionClusterLabel().GetValue())
})
t.Run("failed literal conversion", func(t *testing.T) {
s := testutils.Setup(t)
diff --git a/flytectl/cmd/create/project.go b/flytectl/cmd/create/project.go
index fa1046a248..777ec7a5db 100644
--- a/flytectl/cmd/create/project.go
+++ b/flytectl/cmd/create/project.go
@@ -49,11 +49,11 @@ func createProjectsCommand(ctx context.Context, args []string, cmdCtx cmdCore.Co
if err != nil {
return err
}
- if projectSpec.Id == "" {
- return fmt.Errorf(clierrors.ErrProjectNotPassed)
+ if projectSpec.GetId() == "" {
+ return fmt.Errorf(clierrors.ErrProjectNotPassed) //nolint
}
- if projectSpec.Name == "" {
- return fmt.Errorf(clierrors.ErrProjectNameNotPassed)
+ if projectSpec.GetName() == "" {
+ return fmt.Errorf(clierrors.ErrProjectNameNotPassed) //nolint
}
if project.DefaultProjectConfig.DryRun {
@@ -61,10 +61,10 @@ func createProjectsCommand(ctx context.Context, args []string, cmdCtx cmdCore.Co
} else {
_, err := cmdCtx.AdminClient().RegisterProject(ctx, &admin.ProjectRegisterRequest{
Project: &admin.Project{
- Id: projectSpec.Id,
- Name: projectSpec.Name,
- Description: projectSpec.Description,
- Labels: projectSpec.Labels,
+ Id: projectSpec.GetId(),
+ Name: projectSpec.GetName(),
+ Description: projectSpec.GetDescription(),
+ Labels: projectSpec.GetLabels(),
},
})
if err != nil {
diff --git a/flytectl/cmd/get/execution.go b/flytectl/cmd/get/execution.go
index 8bfcc36e00..dca51dd9a3 100644
--- a/flytectl/cmd/get/execution.go
+++ b/flytectl/cmd/get/execution.go
@@ -118,7 +118,7 @@ func getCallBack(ctx context.Context, cmdCtx cmdCore.CommandContext) bubbletea.D
if err != nil {
return nil, err
}
- return ExecutionToProtoMessages(executionList.Executions), nil
+ return ExecutionToProtoMessages(executionList.GetExecutions()), nil
}
}
@@ -160,7 +160,7 @@ func getExecutionFunc(ctx context.Context, args []string, cmdCtx cmdCore.Command
if err != nil {
return err
}
- logger.Infof(ctx, "Retrieved %v executions", len(executionList.Executions))
+ logger.Infof(ctx, "Retrieved %v executions", len(executionList.GetExecutions()))
return adminPrinter.Print(config.GetConfig().MustOutputFormat(), executionColumns,
- ExecutionToProtoMessages(executionList.Executions)...)
+ ExecutionToProtoMessages(executionList.GetExecutions())...)
}
diff --git a/flytectl/cmd/get/execution_util.go b/flytectl/cmd/get/execution_util.go
index 45214a7ac8..ea99872f9b 100644
--- a/flytectl/cmd/get/execution_util.go
+++ b/flytectl/cmd/get/execution_util.go
@@ -43,7 +43,7 @@ func WriteExecConfigToFile(executionConfig ExecutionConfig, fileName string) err
func CreateAndWriteExecConfigForTask(task *admin.Task, fileName string) error {
var err error
- executionConfig := ExecutionConfig{Task: task.Id.Name, Version: task.Id.Version}
+ executionConfig := ExecutionConfig{Task: task.GetId().GetName(), Version: task.GetId().GetVersion()}
if executionConfig.Inputs, err = ParamMapForTask(task); err != nil {
return err
}
@@ -52,7 +52,7 @@ func CreateAndWriteExecConfigForTask(task *admin.Task, fileName string) error {
func CreateAndWriteExecConfigForWorkflow(wlp *admin.LaunchPlan, fileName string) error {
var err error
- executionConfig := ExecutionConfig{Workflow: wlp.Id.Name, Version: wlp.Id.Version}
+ executionConfig := ExecutionConfig{Workflow: wlp.GetId().GetName(), Version: wlp.GetId().GetVersion()}
if executionConfig.Inputs, err = ParamMapForWorkflow(wlp); err != nil {
return err
}
@@ -61,31 +61,31 @@ func CreateAndWriteExecConfigForWorkflow(wlp *admin.LaunchPlan, fileName string)
func TaskInputs(task *admin.Task) map[string]*core.Variable {
taskInputs := map[string]*core.Variable{}
- if task == nil || task.Closure == nil {
+ if task == nil || task.GetClosure() == nil {
return taskInputs
}
- if task.Closure.CompiledTask == nil {
+ if task.GetClosure().GetCompiledTask() == nil {
return taskInputs
}
- if task.Closure.CompiledTask.Template == nil {
+ if task.GetClosure().GetCompiledTask().GetTemplate() == nil {
return taskInputs
}
- if task.Closure.CompiledTask.Template.Interface == nil {
+ if task.GetClosure().GetCompiledTask().GetTemplate().GetInterface() == nil {
return taskInputs
}
- if task.Closure.CompiledTask.Template.Interface.Inputs == nil {
+ if task.GetClosure().GetCompiledTask().GetTemplate().GetInterface().GetInputs() == nil {
return taskInputs
}
- return task.Closure.CompiledTask.Template.Interface.Inputs.Variables
+ return task.GetClosure().GetCompiledTask().GetTemplate().GetInterface().GetInputs().GetVariables()
}
func ParamMapForTask(task *admin.Task) (map[string]yaml.Node, error) {
taskInputs := TaskInputs(task)
paramMap := make(map[string]yaml.Node, len(taskInputs))
for k, v := range taskInputs {
- varTypeValue, err := coreutils.MakeDefaultLiteralForType(v.Type)
+ varTypeValue, err := coreutils.MakeDefaultLiteralForType(v.GetType())
if err != nil {
- fmt.Println("error creating default value for literal type ", v.Type)
+ fmt.Println("error creating default value for literal type ", v.GetType())
return nil, err
}
var nativeLiteral interface{}
@@ -93,11 +93,11 @@ func ParamMapForTask(task *admin.Task) (map[string]yaml.Node, error) {
return nil, err
}
- if k == v.Description {
+ if k == v.GetDescription() {
// a: # a isn't very helpful
paramMap[k], err = getCommentedYamlNode(nativeLiteral, "")
} else {
- paramMap[k], err = getCommentedYamlNode(nativeLiteral, v.Description)
+ paramMap[k], err = getCommentedYamlNode(nativeLiteral, v.GetDescription())
}
if err != nil {
return nil, err
@@ -108,22 +108,22 @@ func ParamMapForTask(task *admin.Task) (map[string]yaml.Node, error) {
func WorkflowParams(lp *admin.LaunchPlan) map[string]*core.Parameter {
workflowParams := map[string]*core.Parameter{}
- if lp == nil || lp.Spec == nil {
+ if lp == nil || lp.GetSpec() == nil {
return workflowParams
}
- if lp.Spec.DefaultInputs == nil {
+ if lp.GetSpec().GetDefaultInputs() == nil {
return workflowParams
}
- return lp.Spec.DefaultInputs.Parameters
+ return lp.GetSpec().GetDefaultInputs().GetParameters()
}
func ParamMapForWorkflow(lp *admin.LaunchPlan) (map[string]yaml.Node, error) {
workflowParams := WorkflowParams(lp)
paramMap := make(map[string]yaml.Node, len(workflowParams))
for k, v := range workflowParams {
- varTypeValue, err := coreutils.MakeDefaultLiteralForType(v.Var.Type)
+ varTypeValue, err := coreutils.MakeDefaultLiteralForType(v.GetVar().GetType())
if err != nil {
- fmt.Println("error creating default value for literal type ", v.Var.Type)
+ fmt.Println("error creating default value for literal type ", v.GetVar().GetType())
return nil, err
}
var nativeLiteral interface{}
@@ -131,16 +131,16 @@ func ParamMapForWorkflow(lp *admin.LaunchPlan) (map[string]yaml.Node, error) {
return nil, err
}
// Override if there is a default value
- if paramsDefault, ok := v.Behavior.(*core.Parameter_Default); ok {
+ if paramsDefault, ok := v.GetBehavior().(*core.Parameter_Default); ok {
if nativeLiteral, err = coreutils.ExtractFromLiteral(paramsDefault.Default); err != nil {
return nil, err
}
}
- if k == v.Var.Description {
+ if k == v.GetVar().GetDescription() {
// a: # a isn't very helpful
paramMap[k], err = getCommentedYamlNode(nativeLiteral, "")
} else {
- paramMap[k], err = getCommentedYamlNode(nativeLiteral, v.Var.Description)
+ paramMap[k], err = getCommentedYamlNode(nativeLiteral, v.GetVar().GetDescription())
}
if err != nil {
diff --git a/flytectl/cmd/get/execution_util_test.go b/flytectl/cmd/get/execution_util_test.go
index 2c081950f2..35b7160efb 100644
--- a/flytectl/cmd/get/execution_util_test.go
+++ b/flytectl/cmd/get/execution_util_test.go
@@ -18,7 +18,7 @@ func TestTaskInputs(t *testing.T) {
t.Run("valid inputs", func(t *testing.T) {
task := createTask()
retValue := TaskInputs(task)
- assert.Equal(t, task.Closure.CompiledTask.Template.Interface.Inputs.Variables, retValue)
+ assert.Equal(t, task.GetClosure().GetCompiledTask().GetTemplate().GetInterface().GetInputs().GetVariables(), retValue)
})
t.Run("closure compiled task nil", func(t *testing.T) {
task := createTask()
diff --git a/flytectl/cmd/get/launch_plan.go b/flytectl/cmd/get/launch_plan.go
index 79cede32c5..aaf1eacc52 100644
--- a/flytectl/cmd/get/launch_plan.go
+++ b/flytectl/cmd/get/launch_plan.go
@@ -152,12 +152,12 @@ func LaunchplanToTableProtoMessages(l []*admin.LaunchPlan) []proto.Message {
messages := make([]proto.Message, 0, len(l))
for _, m := range l {
m := proto.Clone(m).(*admin.LaunchPlan)
- if m.Closure != nil {
- if m.Closure.ExpectedInputs != nil && m.Closure.ExpectedInputs.Parameters != nil {
- printer.FormatParameterDescriptions(m.Closure.ExpectedInputs.Parameters)
+ if m.GetClosure() != nil {
+ if m.GetClosure().GetExpectedInputs() != nil && m.Closure.ExpectedInputs.Parameters != nil {
+ printer.FormatParameterDescriptions(m.GetClosure().GetExpectedInputs().GetParameters())
}
- if m.Closure.ExpectedOutputs != nil && m.Closure.ExpectedOutputs.Variables != nil {
- printer.FormatVariableDescriptions(m.Closure.ExpectedOutputs.Variables)
+ if m.GetClosure().GetExpectedOutputs() != nil && m.Closure.ExpectedOutputs.Variables != nil {
+ printer.FormatVariableDescriptions(m.GetClosure().GetExpectedOutputs().GetVariables())
}
}
messages = append(messages, m)
diff --git a/flytectl/cmd/get/launch_plan_test.go b/flytectl/cmd/get/launch_plan_test.go
index 7b1359b7ec..64e1e99c09 100644
--- a/flytectl/cmd/get/launch_plan_test.go
+++ b/flytectl/cmd/get/launch_plan_test.go
@@ -275,7 +275,7 @@ func TestGetLaunchPlanFuncWithError(t *testing.T) {
func TestGetLaunchPlanFunc(t *testing.T) {
s := testutils.Setup(t)
getLaunchPlanSetup()
- s.FetcherExt.OnFetchAllVerOfLPMatch(mock.Anything, mock.Anything, "dummyProject", "dummyDomain", filters.Filters{}).Return(launchPlanListResponse.LaunchPlans, nil)
+ s.FetcherExt.OnFetchAllVerOfLPMatch(mock.Anything, mock.Anything, "dummyProject", "dummyDomain", filters.Filters{}).Return(launchPlanListResponse.GetLaunchPlans(), nil)
err := getLaunchPlanFunc(s.Ctx, argsLp, s.CmdCtx)
assert.Nil(t, err)
s.FetcherExt.AssertCalled(t, "FetchAllVerOfLP", s.Ctx, "launchplan1", "dummyProject", "dummyDomain", launchplan.DefaultConfig.Filter)
@@ -308,7 +308,7 @@ func TestGetLaunchPlans(t *testing.T) {
t.Run("no workflow filter", func(t *testing.T) {
s := testutils.Setup(t)
getLaunchPlanSetup()
- s.FetcherExt.OnFetchAllVerOfLP(s.Ctx, "", "dummyProject", "dummyDomain", filters.Filters{}).Return(launchPlanListResponse.LaunchPlans, nil)
+ s.FetcherExt.OnFetchAllVerOfLP(s.Ctx, "", "dummyProject", "dummyDomain", filters.Filters{}).Return(launchPlanListResponse.GetLaunchPlans(), nil)
argsLp = []string{}
err := getLaunchPlanFunc(s.Ctx, argsLp, s.CmdCtx)
assert.Nil(t, err)
@@ -319,7 +319,7 @@ func TestGetLaunchPlans(t *testing.T) {
getLaunchPlanSetup()
s.FetcherExt.OnFetchAllVerOfLP(s.Ctx, "", "dummyProject", "dummyDomain", filters.Filters{
FieldSelector: "workflow.name=workflow2",
- }).Return(launchPlanListResponse.LaunchPlans, nil)
+ }).Return(launchPlanListResponse.GetLaunchPlans(), nil)
argsLp = []string{}
launchplan.DefaultConfig.Workflow = "workflow2"
err := getLaunchPlanFunc(s.Ctx, argsLp, s.CmdCtx)
@@ -379,7 +379,7 @@ func TestGetLaunchPlanTableFunc(t *testing.T) {
s.MockAdminClient.OnListLaunchPlansMatch(s.Ctx, resourceGetRequest).Return(launchPlanListResponse, nil)
s.MockAdminClient.OnGetLaunchPlanMatch(s.Ctx, objectGetRequest).Return(launchPlan2, nil)
s.MockAdminClient.OnListLaunchPlanIdsMatch(s.Ctx, namedIDRequest).Return(namedIdentifierList, nil)
- s.FetcherExt.OnFetchAllVerOfLP(s.Ctx, "launchplan1", "dummyProject", "dummyDomain", filters.Filters{}).Return(launchPlanListResponse.LaunchPlans, nil)
+ s.FetcherExt.OnFetchAllVerOfLP(s.Ctx, "launchplan1", "dummyProject", "dummyDomain", filters.Filters{}).Return(launchPlanListResponse.GetLaunchPlans(), nil)
config.GetConfig().Output = printer.OutputFormatTABLE.String()
err := getLaunchPlanFunc(s.Ctx, argsLp, s.CmdCtx)
assert.Nil(t, err)
diff --git a/flytectl/cmd/get/node_execution.go b/flytectl/cmd/get/node_execution.go
index 89c902ddbd..2ebe23df1e 100644
--- a/flytectl/cmd/get/node_execution.go
+++ b/flytectl/cmd/get/node_execution.go
@@ -110,7 +110,7 @@ func getExecutionDetails(ctx context.Context, project, domain, execName, nodeNam
}
sort.Slice(nExecDetailsForView[:], func(i, j int) bool {
- return nExecDetailsForView[i].NodeExec.Closure.CreatedAt.AsTime().Before(nExecDetailsForView[j].NodeExec.Closure.CreatedAt.AsTime())
+ return nExecDetailsForView[i].NodeExec.Closure.GetCreatedAt().AsTime().Before(nExecDetailsForView[j].NodeExec.Closure.GetCreatedAt().AsTime())
})
return nExecDetailsForView, nil
@@ -125,49 +125,49 @@ func getNodeExecDetailsInt(ctx context.Context, project, domain, execName, nodeN
}
var nodeExecClosures []*NodeExecutionClosure
- for _, nodeExec := range nExecDetails.NodeExecutions {
+ for _, nodeExec := range nExecDetails.GetNodeExecutions() {
nodeExecClosure := &NodeExecutionClosure{
NodeExec: &NodeExecution{nodeExec},
}
nodeExecClosures = append(nodeExecClosures, nodeExecClosure)
// Check if this is parent node. If yes do recursive call to get child nodes.
- if nodeExec.Metadata != nil && nodeExec.Metadata.IsParentNode {
- nodeExecClosure.ChildNodes, err = getNodeExecDetailsInt(ctx, project, domain, execName, nodeName, nodeExec.Id.NodeId, nodeExecDetailsMap, cmdCtx)
+ if nodeExec.GetMetadata() != nil && nodeExec.GetMetadata().GetIsParentNode() {
+ nodeExecClosure.ChildNodes, err = getNodeExecDetailsInt(ctx, project, domain, execName, nodeName, nodeExec.GetId().GetNodeId(), nodeExecDetailsMap, cmdCtx)
if err != nil {
return nil, err
}
} else {
taskExecList, err := cmdCtx.AdminFetcherExt().FetchTaskExecutionsOnNode(ctx,
- nodeExec.Id.NodeId, execName, project, domain)
+ nodeExec.GetId().GetNodeId(), execName, project, domain)
if err != nil {
return nil, err
}
- for _, taskExec := range taskExecList.TaskExecutions {
+ for _, taskExec := range taskExecList.GetTaskExecutions() {
taskExecClosure := &TaskExecutionClosure{
TaskExecution: &TaskExecution{taskExec},
}
nodeExecClosure.TaskExecutions = append(nodeExecClosure.TaskExecutions, taskExecClosure)
}
// Fetch the node inputs and outputs
- nExecDataResp, err := cmdCtx.AdminFetcherExt().FetchNodeExecutionData(ctx, nodeExec.Id.NodeId, execName, project, domain)
+ nExecDataResp, err := cmdCtx.AdminFetcherExt().FetchNodeExecutionData(ctx, nodeExec.GetId().GetNodeId(), execName, project, domain)
if err != nil {
return nil, err
}
// Extract the inputs from the literal map
- nodeExecClosure.Inputs, err = extractLiteralMap(nExecDataResp.FullInputs)
+ nodeExecClosure.Inputs, err = extractLiteralMap(nExecDataResp.GetFullInputs())
if err != nil {
return nil, err
}
// Extract the outputs from the literal map
- nodeExecClosure.Outputs, err = extractLiteralMap(nExecDataResp.FullOutputs)
+ nodeExecClosure.Outputs, err = extractLiteralMap(nExecDataResp.GetFullOutputs())
if err != nil {
return nil, err
}
}
- nodeExecDetailsMap[nodeExec.Id.NodeId] = nodeExecClosure
+ nodeExecDetailsMap[nodeExec.GetId().GetNodeId()] = nodeExecClosure
// Found the node
- if len(nodeName) > 0 && nodeName == nodeExec.Id.NodeId {
+ if len(nodeName) > 0 && nodeName == nodeExec.GetId().GetNodeId() {
return nodeExecClosures, err
}
}
@@ -183,38 +183,38 @@ func createNodeTaskExecTreeView(rootView gotree.Tree, taskExecClosures []*TaskEx
}
// TODO: Replace this by filter to sort in the admin
sort.Slice(taskExecClosures[:], func(i, j int) bool {
- return taskExecClosures[i].Id.RetryAttempt < taskExecClosures[j].Id.RetryAttempt
+ return taskExecClosures[i].Id.GetRetryAttempt() < taskExecClosures[j].Id.GetRetryAttempt()
})
for _, taskExecClosure := range taskExecClosures {
- attemptView := rootView.Add(taskAttemptPrefix + strconv.Itoa(int(taskExecClosure.Id.RetryAttempt)))
- attemptView.Add(taskExecPrefix + taskExecClosure.Closure.Phase.String() +
- hyphenPrefix + taskExecClosure.Closure.CreatedAt.AsTime().String() +
- hyphenPrefix + taskExecClosure.Closure.UpdatedAt.AsTime().String())
- attemptView.Add(taskTypePrefix + taskExecClosure.Closure.TaskType)
- attemptView.Add(taskReasonPrefix + taskExecClosure.Closure.Reason)
- if taskExecClosure.Closure.Metadata != nil {
+ attemptView := rootView.Add(taskAttemptPrefix + strconv.Itoa(int(taskExecClosure.Id.GetRetryAttempt())))
+ attemptView.Add(taskExecPrefix + taskExecClosure.Closure.GetPhase().String() +
+ hyphenPrefix + taskExecClosure.Closure.GetCreatedAt().AsTime().String() +
+ hyphenPrefix + taskExecClosure.Closure.GetUpdatedAt().AsTime().String())
+ attemptView.Add(taskTypePrefix + taskExecClosure.Closure.GetTaskType())
+ attemptView.Add(taskReasonPrefix + taskExecClosure.Closure.GetReason())
+ if taskExecClosure.Closure.GetMetadata() != nil {
metadata := attemptView.Add(taskMetadataPrefix)
- metadata.Add(taskGeneratedNamePrefix + taskExecClosure.Closure.Metadata.GeneratedName)
- metadata.Add(taskPluginIDPrefix + taskExecClosure.Closure.Metadata.PluginIdentifier)
+ metadata.Add(taskGeneratedNamePrefix + taskExecClosure.Closure.GetMetadata().GetGeneratedName())
+ metadata.Add(taskPluginIDPrefix + taskExecClosure.Closure.GetMetadata().GetPluginIdentifier())
extResourcesView := metadata.Add(taskExtResourcesPrefix)
- for _, extResource := range taskExecClosure.Closure.Metadata.ExternalResources {
- extResourcesView.Add(taskExtResourcePrefix + extResource.ExternalId)
+ for _, extResource := range taskExecClosure.Closure.GetMetadata().GetExternalResources() {
+ extResourcesView.Add(taskExtResourcePrefix + extResource.GetExternalId())
}
resourcePoolInfoView := metadata.Add(taskResourcePrefix)
- for _, rsPool := range taskExecClosure.Closure.Metadata.ResourcePoolInfo {
- resourcePoolInfoView.Add(taskExtResourcePrefix + rsPool.Namespace)
- resourcePoolInfoView.Add(taskExtResourceTokenPrefix + rsPool.AllocationToken)
+ for _, rsPool := range taskExecClosure.Closure.GetMetadata().GetResourcePoolInfo() {
+ resourcePoolInfoView.Add(taskExtResourcePrefix + rsPool.GetNamespace())
+ resourcePoolInfoView.Add(taskExtResourceTokenPrefix + rsPool.GetAllocationToken())
}
}
- sort.Slice(taskExecClosure.Closure.Logs[:], func(i, j int) bool {
- return taskExecClosure.Closure.Logs[i].Name < taskExecClosure.Closure.Logs[j].Name
+ sort.Slice(taskExecClosure.Closure.GetLogs()[:], func(i, j int) bool {
+ return taskExecClosure.Closure.GetLogs()[i].GetName() < taskExecClosure.Closure.GetLogs()[j].GetName()
})
logsView := attemptView.Add(taskLogsPrefix)
- for _, logData := range taskExecClosure.Closure.Logs {
- logsView.Add(taskLogsNamePrefix + logData.Name)
- logsView.Add(taskLogURIPrefix + logData.Uri)
+ for _, logData := range taskExecClosure.Closure.GetLogs() {
+ logsView.Add(taskLogsNamePrefix + logData.GetName())
+ logsView.Add(taskLogURIPrefix + logData.GetUri())
}
}
}
@@ -228,13 +228,13 @@ func createNodeDetailsTreeView(rootView gotree.Tree, nodeExecutionClosures []*No
}
// TODO : Move to sorting using filters.
sort.Slice(nodeExecutionClosures[:], func(i, j int) bool {
- return nodeExecutionClosures[i].NodeExec.Closure.CreatedAt.AsTime().Before(nodeExecutionClosures[j].NodeExec.Closure.CreatedAt.AsTime())
+ return nodeExecutionClosures[i].NodeExec.Closure.GetCreatedAt().AsTime().Before(nodeExecutionClosures[j].NodeExec.Closure.GetCreatedAt().AsTime())
})
for _, nodeExecWrapper := range nodeExecutionClosures {
- nExecView := rootView.Add(nodeExecWrapper.NodeExec.Id.NodeId + hyphenPrefix + nodeExecWrapper.NodeExec.Closure.Phase.String() +
- hyphenPrefix + nodeExecWrapper.NodeExec.Closure.CreatedAt.AsTime().String() +
- hyphenPrefix + nodeExecWrapper.NodeExec.Closure.UpdatedAt.AsTime().String())
+ nExecView := rootView.Add(nodeExecWrapper.NodeExec.Id.GetNodeId() + hyphenPrefix + nodeExecWrapper.NodeExec.Closure.GetPhase().String() +
+ hyphenPrefix + nodeExecWrapper.NodeExec.Closure.GetCreatedAt().AsTime().String() +
+ hyphenPrefix + nodeExecWrapper.NodeExec.Closure.GetUpdatedAt().AsTime().String())
if len(nodeExecWrapper.ChildNodes) > 0 {
createNodeDetailsTreeView(nExecView, nodeExecWrapper.ChildNodes)
}
@@ -254,7 +254,7 @@ func extractLiteralMap(literalMap *core.LiteralMap) (map[string]interface{}, err
if literalMap == nil || literalMap.Literals == nil {
return m, nil
}
- for key, literalVal := range literalMap.Literals {
+ for key, literalVal := range literalMap.GetLiterals() {
extractedLiteralVal, err := coreutils.ExtractFromLiteral(literalVal)
if err != nil {
return nil, err
diff --git a/flytectl/cmd/get/project.go b/flytectl/cmd/get/project.go
index 96b68c56fb..3d1bd87ce2 100644
--- a/flytectl/cmd/get/project.go
+++ b/flytectl/cmd/get/project.go
@@ -84,9 +84,9 @@ func getProjectsFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandC
if len(args) == 1 {
id := args[0]
- logger.Debugf(ctx, "Retrieved %v projects", len(projects.Projects))
- for _, v := range projects.Projects {
- if v.Id == id {
+ logger.Debugf(ctx, "Retrieved %v projects", len(projects.GetProjects()))
+ for _, v := range projects.GetProjects() {
+ if v.GetId() == id {
err := adminPrinter.Print(config.GetConfig().MustOutputFormat(), projectColumns, v)
if err != nil {
return err
@@ -97,6 +97,6 @@ func getProjectsFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandC
return nil
}
- logger.Debugf(ctx, "Retrieved %v projects", len(projects.Projects))
- return adminPrinter.Print(config.GetConfig().MustOutputFormat(), projectColumns, ProjectToProtoMessages(projects.Projects)...)
+ logger.Debugf(ctx, "Retrieved %v projects", len(projects.GetProjects()))
+ return adminPrinter.Print(config.GetConfig().MustOutputFormat(), projectColumns, ProjectToProtoMessages(projects.GetProjects())...)
}
diff --git a/flytectl/cmd/get/task.go b/flytectl/cmd/get/task.go
index 0b050f5546..fcbb00b684 100644
--- a/flytectl/cmd/get/task.go
+++ b/flytectl/cmd/get/task.go
@@ -125,14 +125,14 @@ func TaskToTableProtoMessages(l []*admin.Task) []proto.Message {
messages := make([]proto.Message, 0, len(l))
for _, m := range l {
m := proto.Clone(m).(*admin.Task)
- if m.Closure != nil && m.Closure.CompiledTask != nil {
- if m.Closure.CompiledTask.Template != nil {
- if m.Closure.CompiledTask.Template.Interface != nil {
- if m.Closure.CompiledTask.Template.Interface.Inputs != nil && m.Closure.CompiledTask.Template.Interface.Inputs.Variables != nil {
- printer.FormatVariableDescriptions(m.Closure.CompiledTask.Template.Interface.Inputs.Variables)
+ if m.GetClosure() != nil && m.GetClosure().GetCompiledTask() != nil {
+ if m.GetClosure().GetCompiledTask().GetTemplate() != nil {
+ if m.GetClosure().GetCompiledTask().GetTemplate().GetInterface() != nil {
+ if m.GetClosure().GetCompiledTask().GetTemplate().GetInterface().GetInputs() != nil && m.Closure.CompiledTask.Template.Interface.Inputs.Variables != nil {
+ printer.FormatVariableDescriptions(m.GetClosure().GetCompiledTask().GetTemplate().GetInterface().GetInputs().GetVariables())
}
- if m.Closure.CompiledTask.Template.Interface.Outputs != nil && m.Closure.CompiledTask.Template.Interface.Outputs.Variables != nil {
- printer.FormatVariableDescriptions(m.Closure.CompiledTask.Template.Interface.Outputs.Variables)
+ if m.GetClosure().GetCompiledTask().GetTemplate().GetInterface().GetOutputs() != nil && m.Closure.CompiledTask.Template.Interface.Outputs.Variables != nil {
+ printer.FormatVariableDescriptions(m.GetClosure().GetCompiledTask().GetTemplate().GetInterface().GetOutputs().GetVariables())
}
}
}
diff --git a/flytectl/cmd/get/task_test.go b/flytectl/cmd/get/task_test.go
index d0f817fd1e..e5c2dafc94 100644
--- a/flytectl/cmd/get/task_test.go
+++ b/flytectl/cmd/get/task_test.go
@@ -244,7 +244,7 @@ func TestGetTaskFunc(t *testing.T) {
s.MockAdminClient.OnListTasksMatch(s.Ctx, resourceListRequestTask).Return(taskListResponse, nil)
s.MockAdminClient.OnGetTaskMatch(s.Ctx, objectGetRequestTask).Return(task2, nil)
s.FetcherExt.OnFetchAllVerOfTaskMatch(mock.Anything, mock.Anything, mock.Anything,
- mock.Anything, mock.Anything).Return(taskListResponse.Tasks, nil)
+ mock.Anything, mock.Anything).Return(taskListResponse.GetTasks(), nil)
err := getTaskFunc(s.Ctx, argsTask, s.CmdCtx)
assert.Nil(t, err)
s.FetcherExt.AssertCalled(t, "FetchAllVerOfTask", s.Ctx, "task1", "dummyProject", "dummyDomain", filters.Filters{})
@@ -329,7 +329,7 @@ func TestGetTaskFuncWithTable(t *testing.T) {
taskConfig.DefaultConfig.Filter = filters.Filters{}
s.MockAdminClient.OnListTasksMatch(s.Ctx, resourceListRequestTask).Return(taskListResponse, nil)
s.MockAdminClient.OnGetTaskMatch(s.Ctx, objectGetRequestTask).Return(task2, nil)
- s.FetcherExt.OnFetchAllVerOfTask(s.Ctx, "task1", "dummyProject", "dummyDomain", filters.Filters{}).Return(taskListResponse.Tasks, nil)
+ s.FetcherExt.OnFetchAllVerOfTask(s.Ctx, "task1", "dummyProject", "dummyDomain", filters.Filters{}).Return(taskListResponse.GetTasks(), nil)
config.GetConfig().Output = "table"
err := getTaskFunc(s.Ctx, argsTask, s.CmdCtx)
assert.Nil(t, err)
@@ -455,7 +455,7 @@ func TestGetTasks(t *testing.T) {
taskConfig.DefaultConfig.Filter = filters.Filters{}
s.MockAdminClient.OnListTasksMatch(s.Ctx, resourceListRequestTask).Return(taskListResponse, nil)
s.MockAdminClient.OnGetTaskMatch(s.Ctx, objectGetRequestTask).Return(task2, nil)
- s.FetcherExt.OnFetchAllVerOfTask(s.Ctx, "task1", "dummyProject", "dummyDomain", filters.Filters{}).Return(taskListResponse.Tasks, nil)
+ s.FetcherExt.OnFetchAllVerOfTask(s.Ctx, "task1", "dummyProject", "dummyDomain", filters.Filters{}).Return(taskListResponse.GetTasks(), nil)
err := getTaskFunc(s.Ctx, argsTask, s.CmdCtx)
assert.Nil(t, err)
@@ -471,8 +471,8 @@ func TestGetTasksFilters(t *testing.T) {
}
s.MockAdminClient.OnListTasksMatch(s.Ctx, resourceListFilterRequestTask).Return(taskListFilterResponse, nil)
filteredTasks := []*admin.Task{}
- for _, task := range taskListResponse.Tasks {
- if task.Id.Name == "task1" && task.Id.Version == "v1" {
+ for _, task := range taskListResponse.GetTasks() {
+ if task.GetId().GetName() == "task1" && task.GetId().GetVersion() == "v1" {
filteredTasks = append(filteredTasks, task)
}
}
diff --git a/flytectl/cmd/get/workflow.go b/flytectl/cmd/get/workflow.go
index 624e8d2ba8..ce6dc4db8d 100644
--- a/flytectl/cmd/get/workflow.go
+++ b/flytectl/cmd/get/workflow.go
@@ -129,15 +129,15 @@ func WorkflowToTableProtoMessages(l []*admin.Workflow) []proto.Message {
messages := make([]proto.Message, 0, len(l))
for _, m := range l {
m := proto.Clone(m).(*admin.Workflow)
- if m.Closure != nil && m.Closure.CompiledWorkflow != nil {
- if m.Closure.CompiledWorkflow.Primary != nil {
- if m.Closure.CompiledWorkflow.Primary.Template != nil {
- if m.Closure.CompiledWorkflow.Primary.Template.Interface != nil {
- if m.Closure.CompiledWorkflow.Primary.Template.Interface.Inputs != nil && m.Closure.CompiledWorkflow.Primary.Template.Interface.Inputs.Variables != nil {
- printer.FormatVariableDescriptions(m.Closure.CompiledWorkflow.Primary.Template.Interface.Inputs.Variables)
+ if m.GetClosure() != nil && m.GetClosure().GetCompiledWorkflow() != nil {
+ if m.GetClosure().GetCompiledWorkflow().GetPrimary() != nil {
+ if m.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate() != nil {
+ if m.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetInterface() != nil {
+ if m.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetInterface().GetInputs() != nil && m.Closure.CompiledWorkflow.Primary.Template.Interface.Inputs.Variables != nil {
+ printer.FormatVariableDescriptions(m.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetInterface().GetInputs().GetVariables())
}
- if m.Closure.CompiledWorkflow.Primary.Template.Interface.Outputs != nil && m.Closure.CompiledWorkflow.Primary.Template.Interface.Outputs.Variables != nil {
- printer.FormatVariableDescriptions(m.Closure.CompiledWorkflow.Primary.Template.Interface.Outputs.Variables)
+ if m.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetInterface().GetOutputs() != nil && m.Closure.CompiledWorkflow.Primary.Template.Interface.Outputs.Variables != nil {
+ printer.FormatVariableDescriptions(m.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetInterface().GetOutputs().GetVariables())
}
}
}
diff --git a/flytectl/cmd/register/register_util.go b/flytectl/cmd/register/register_util.go
index b7b419e611..4ef1bab1c1 100644
--- a/flytectl/cmd/register/register_util.go
+++ b/flytectl/cmd/register/register_util.go
@@ -171,10 +171,10 @@ func register(ctx context.Context, message proto.Message, cmdCtx cmdCore.Command
ResourceType: core.ResourceType_LAUNCH_PLAN,
Project: config.GetConfig().Project,
Domain: config.GetConfig().Domain,
- Name: launchPlan.Id.Name,
- Version: launchPlan.Id.Version,
+ Name: launchPlan.GetId().GetName(),
+ Version: launchPlan.GetId().GetVersion(),
},
- Spec: launchPlan.Spec,
+ Spec: launchPlan.GetSpec(),
})
if err != nil {
return err
@@ -185,8 +185,8 @@ func register(ctx context.Context, message proto.Message, cmdCtx cmdCore.Command
Id: &core.Identifier{
Project: config.GetConfig().Project,
Domain: config.GetConfig().Domain,
- Name: launchPlan.Id.Name,
- Version: launchPlan.Id.Version,
+ Name: launchPlan.GetId().GetName(),
+ Version: launchPlan.GetId().GetVersion(),
},
State: admin.LaunchPlanState_ACTIVE,
})
@@ -205,8 +205,8 @@ func register(ctx context.Context, message proto.Message, cmdCtx cmdCore.Command
ResourceType: core.ResourceType_WORKFLOW,
Project: config.GetConfig().Project,
Domain: config.GetConfig().Domain,
- Name: workflowSpec.Template.Id.Name,
- Version: workflowSpec.Template.Id.Version,
+ Name: workflowSpec.GetTemplate().GetId().GetName(),
+ Version: workflowSpec.GetTemplate().GetId().GetVersion(),
},
Spec: workflowSpec,
})
@@ -223,8 +223,8 @@ func register(ctx context.Context, message proto.Message, cmdCtx cmdCore.Command
ResourceType: core.ResourceType_TASK,
Project: config.GetConfig().Project,
Domain: config.GetConfig().Domain,
- Name: taskSpec.Template.Id.Name,
- Version: taskSpec.Template.Id.Version,
+ Name: taskSpec.GetTemplate().GetId().GetName(),
+ Version: taskSpec.GetTemplate().GetId().GetVersion(),
},
Spec: taskSpec,
})
@@ -235,39 +235,39 @@ func register(ctx context.Context, message proto.Message, cmdCtx cmdCore.Command
}
func hydrateNode(node *core.Node, version string, force bool) error {
- targetNode := node.Target
+ targetNode := node.GetTarget()
switch v := targetNode.(type) {
case *core.Node_TaskNode:
taskNodeWrapper := targetNode.(*core.Node_TaskNode)
- taskNodeReference := taskNodeWrapper.TaskNode.Reference.(*core.TaskNode_ReferenceId)
+ taskNodeReference := taskNodeWrapper.TaskNode.GetReference().(*core.TaskNode_ReferenceId)
hydrateIdentifier(taskNodeReference.ReferenceId, version, force)
case *core.Node_WorkflowNode:
workflowNodeWrapper := targetNode.(*core.Node_WorkflowNode)
- switch workflowNodeWrapper.WorkflowNode.Reference.(type) {
+ switch workflowNodeWrapper.WorkflowNode.GetReference().(type) {
case *core.WorkflowNode_SubWorkflowRef:
- subWorkflowNodeReference := workflowNodeWrapper.WorkflowNode.Reference.(*core.WorkflowNode_SubWorkflowRef)
+ subWorkflowNodeReference := workflowNodeWrapper.WorkflowNode.GetReference().(*core.WorkflowNode_SubWorkflowRef)
hydrateIdentifier(subWorkflowNodeReference.SubWorkflowRef, version, force)
case *core.WorkflowNode_LaunchplanRef:
- launchPlanNodeReference := workflowNodeWrapper.WorkflowNode.Reference.(*core.WorkflowNode_LaunchplanRef)
+ launchPlanNodeReference := workflowNodeWrapper.WorkflowNode.GetReference().(*core.WorkflowNode_LaunchplanRef)
hydrateIdentifier(launchPlanNodeReference.LaunchplanRef, version, force)
default:
- return fmt.Errorf("unknown type %T", workflowNodeWrapper.WorkflowNode.Reference)
+ return fmt.Errorf("unknown type %T", workflowNodeWrapper.WorkflowNode.GetReference())
}
case *core.Node_BranchNode:
branchNodeWrapper := targetNode.(*core.Node_BranchNode)
- if err := hydrateNode(branchNodeWrapper.BranchNode.IfElse.Case.ThenNode, version, force); err != nil {
+ if err := hydrateNode(branchNodeWrapper.BranchNode.GetIfElse().GetCase().GetThenNode(), version, force); err != nil {
return fmt.Errorf("failed to hydrateNode")
}
- if len(branchNodeWrapper.BranchNode.IfElse.Other) > 0 {
- for _, ifBlock := range branchNodeWrapper.BranchNode.IfElse.Other {
- if err := hydrateNode(ifBlock.ThenNode, version, force); err != nil {
+ if len(branchNodeWrapper.BranchNode.GetIfElse().GetOther()) > 0 {
+ for _, ifBlock := range branchNodeWrapper.BranchNode.GetIfElse().GetOther() {
+ if err := hydrateNode(ifBlock.GetThenNode(), version, force); err != nil {
return fmt.Errorf("failed to hydrateNode")
}
}
}
- switch branchNodeWrapper.BranchNode.IfElse.Default.(type) {
+ switch branchNodeWrapper.BranchNode.GetIfElse().GetDefault().(type) {
case *core.IfElseBlock_ElseNode:
- elseNodeReference := branchNodeWrapper.BranchNode.IfElse.Default.(*core.IfElseBlock_ElseNode)
+ elseNodeReference := branchNodeWrapper.BranchNode.GetIfElse().GetDefault().(*core.IfElseBlock_ElseNode)
if err := hydrateNode(elseNodeReference.ElseNode, version, force); err != nil {
return fmt.Errorf("failed to hydrateNode")
}
@@ -275,12 +275,12 @@ func hydrateNode(node *core.Node, version string, force bool) error {
case *core.IfElseBlock_Error:
// Do nothing.
default:
- return fmt.Errorf("unknown type %T", branchNodeWrapper.BranchNode.IfElse.Default)
+ return fmt.Errorf("unknown type %T", branchNodeWrapper.BranchNode.GetIfElse().GetDefault())
}
case *core.Node_GateNode:
// Do nothing.
case *core.Node_ArrayNode:
- if err := hydrateNode(v.ArrayNode.Node, version, force); err != nil {
+ if err := hydrateNode(v.ArrayNode.GetNode(), version, force); err != nil {
return fmt.Errorf("failed to hydrateNode")
}
default:
@@ -290,33 +290,33 @@ func hydrateNode(node *core.Node, version string, force bool) error {
}
func hydrateIdentifier(identifier *core.Identifier, version string, force bool) {
- if identifier.Project == "" || identifier.Project == registrationProjectPattern {
+ if identifier.GetProject() == "" || identifier.GetProject() == registrationProjectPattern {
identifier.Project = config.GetConfig().Project
}
- if identifier.Domain == "" || identifier.Domain == registrationDomainPattern {
+ if identifier.GetDomain() == "" || identifier.GetDomain() == registrationDomainPattern {
identifier.Domain = config.GetConfig().Domain
}
- if force || identifier.Version == "" || identifier.Version == registrationVersionPattern {
+ if force || identifier.GetVersion() == "" || identifier.GetVersion() == registrationVersionPattern {
identifier.Version = version
}
}
func hydrateTaskSpec(task *admin.TaskSpec, sourceUploadedLocation storage.DataReference, destinationDir string) error {
- if task.Template.GetContainer() != nil {
- for k := range task.Template.GetContainer().Args {
- if task.Template.GetContainer().Args[k] == registrationRemotePackagePattern {
+ if task.GetTemplate().GetContainer() != nil {
+ for k := range task.GetTemplate().GetContainer().GetArgs() {
+ if task.GetTemplate().GetContainer().GetArgs()[k] == registrationRemotePackagePattern {
task.Template.GetContainer().Args[k] = sourceUploadedLocation.String()
}
- if task.Template.GetContainer().Args[k] == registrationDestDirPattern {
+ if task.GetTemplate().GetContainer().GetArgs()[k] == registrationDestDirPattern {
task.Template.GetContainer().Args[k] = "."
if len(destinationDir) > 0 {
task.Template.GetContainer().Args[k] = destinationDir
}
}
}
- } else if task.Template.GetK8SPod() != nil && task.Template.GetK8SPod().PodSpec != nil {
+ } else if task.GetTemplate().GetK8SPod() != nil && task.GetTemplate().GetK8SPod().GetPodSpec() != nil {
var podSpec = v1.PodSpec{}
- err := utils.UnmarshalStructToObj(task.Template.GetK8SPod().PodSpec, &podSpec)
+ err := utils.UnmarshalStructToObj(task.GetTemplate().GetK8SPod().GetPodSpec(), &podSpec)
if err != nil {
return err
}
@@ -339,9 +339,9 @@ func hydrateTaskSpec(task *admin.TaskSpec, sourceUploadedLocation storage.DataRe
}
task.Template.Target = &core.TaskTemplate_K8SPod{
K8SPod: &core.K8SPod{
- Metadata: task.Template.GetK8SPod().Metadata,
+ Metadata: task.GetTemplate().GetK8SPod().GetMetadata(),
PodSpec: podSpecStruct,
- DataConfig: task.Template.GetK8SPod().DataConfig,
+ DataConfig: task.GetTemplate().GetK8SPod().GetDataConfig(),
},
}
}
@@ -349,15 +349,15 @@ func hydrateTaskSpec(task *admin.TaskSpec, sourceUploadedLocation storage.DataRe
}
func validateLPWithSchedule(lpSpec *admin.LaunchPlanSpec, wf *admin.Workflow) error {
- schedule := lpSpec.EntityMetadata.Schedule
+ schedule := lpSpec.GetEntityMetadata().GetSchedule()
var scheduleRequiredParams []string
- if wf != nil && wf.Closure != nil && wf.Closure.CompiledWorkflow != nil &&
- wf.Closure.CompiledWorkflow.Primary != nil && wf.Closure.CompiledWorkflow.Primary.Template != nil &&
- wf.Closure.CompiledWorkflow.Primary.Template.Interface != nil &&
- wf.Closure.CompiledWorkflow.Primary.Template.Interface.Inputs != nil {
- variables := wf.Closure.CompiledWorkflow.Primary.Template.Interface.Inputs.Variables
+ if wf != nil && wf.GetClosure() != nil && wf.GetClosure().GetCompiledWorkflow() != nil &&
+ wf.GetClosure().GetCompiledWorkflow().GetPrimary() != nil && wf.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate() != nil &&
+ wf.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetInterface() != nil &&
+ wf.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetInterface().GetInputs() != nil {
+ variables := wf.GetClosure().GetCompiledWorkflow().GetPrimary().GetTemplate().GetInterface().GetInputs().GetVariables()
for varName := range variables {
- if varName != schedule.KickoffTimeInputArg {
+ if varName != schedule.GetKickoffTimeInputArg() {
scheduleRequiredParams = append(scheduleRequiredParams, varName)
}
}
@@ -366,16 +366,16 @@ func validateLPWithSchedule(lpSpec *admin.LaunchPlanSpec, wf *admin.Workflow) er
// Either the scheduled param should have default or fixed values
var scheduleParamsWithValues []string
// Check for default values
- if lpSpec.DefaultInputs != nil {
- for paramName, paramValue := range lpSpec.DefaultInputs.Parameters {
- if paramName != schedule.KickoffTimeInputArg && paramValue.GetDefault() != nil {
+ if lpSpec.GetDefaultInputs() != nil {
+ for paramName, paramValue := range lpSpec.GetDefaultInputs().GetParameters() {
+ if paramName != schedule.GetKickoffTimeInputArg() && paramValue.GetDefault() != nil {
scheduleParamsWithValues = append(scheduleParamsWithValues, paramName)
}
}
}
// Check for fixed values
- if lpSpec.FixedInputs != nil && lpSpec.FixedInputs.Literals != nil {
- for fixedLiteralName := range lpSpec.FixedInputs.Literals {
+ if lpSpec.GetFixedInputs() != nil && lpSpec.FixedInputs.Literals != nil {
+ for fixedLiteralName := range lpSpec.GetFixedInputs().GetLiterals() {
scheduleParamsWithValues = append(scheduleParamsWithValues, fixedLiteralName)
}
}
@@ -389,14 +389,14 @@ func validateLPWithSchedule(lpSpec *admin.LaunchPlanSpec, wf *admin.Workflow) er
}
func validateLaunchSpec(ctx context.Context, lpSpec *admin.LaunchPlanSpec, cmdCtx cmdCore.CommandContext) error {
- if lpSpec == nil || lpSpec.WorkflowId == nil || lpSpec.EntityMetadata == nil ||
- lpSpec.EntityMetadata.Schedule == nil {
+ if lpSpec == nil || lpSpec.GetWorkflowId() == nil || lpSpec.GetEntityMetadata() == nil ||
+ lpSpec.GetEntityMetadata().GetSchedule() == nil {
return nil
}
// Fetch the workflow spec using the identifier
- workflowID := lpSpec.WorkflowId
- wf, err := cmdCtx.AdminFetcherExt().FetchWorkflowVersion(ctx, workflowID.Name, workflowID.Version,
- workflowID.Project, workflowID.Domain)
+ workflowID := lpSpec.GetWorkflowId()
+ wf, err := cmdCtx.AdminFetcherExt().FetchWorkflowVersion(ctx, workflowID.GetName(), workflowID.GetVersion(),
+ workflowID.GetProject(), workflowID.GetDomain())
if err != nil {
return err
}
@@ -464,7 +464,7 @@ func validateSpec(ctx context.Context, message proto.Message, cmdCtx cmdCore.Com
switch v := message.(type) {
case *admin.LaunchPlan:
launchPlan := v
- if err := validateLaunchSpec(ctx, launchPlan.Spec, cmdCtx); err != nil {
+ if err := validateLaunchSpec(ctx, launchPlan.GetSpec(), cmdCtx); err != nil {
return err
}
}
@@ -475,26 +475,26 @@ func hydrateSpec(message proto.Message, uploadLocation storage.DataReference, co
switch v := message.(type) {
case *admin.LaunchPlan:
launchPlan := message.(*admin.LaunchPlan)
- hydrateIdentifier(launchPlan.Id, config.Version, config.Force)
- hydrateIdentifier(launchPlan.Spec.WorkflowId, config.Version, config.Force)
- if err := hydrateLaunchPlanSpec(config.AssumableIamRole, config.K8sServiceAccount, config.OutputLocationPrefix, launchPlan.Spec); err != nil {
+ hydrateIdentifier(launchPlan.GetId(), config.Version, config.Force)
+ hydrateIdentifier(launchPlan.GetSpec().GetWorkflowId(), config.Version, config.Force)
+ if err := hydrateLaunchPlanSpec(config.AssumableIamRole, config.K8sServiceAccount, config.OutputLocationPrefix, launchPlan.GetSpec()); err != nil {
return err
}
case *admin.WorkflowSpec:
workflowSpec := message.(*admin.WorkflowSpec)
- for _, Noderef := range workflowSpec.Template.Nodes {
+ for _, Noderef := range workflowSpec.GetTemplate().GetNodes() {
if err := hydrateNode(Noderef, config.Version, config.Force); err != nil {
return err
}
}
- if workflowSpec.Template.GetFailureNode() != nil {
- if err := hydrateNode(workflowSpec.Template.GetFailureNode(), config.Version, config.Force); err != nil {
+ if workflowSpec.GetTemplate().GetFailureNode() != nil {
+ if err := hydrateNode(workflowSpec.GetTemplate().GetFailureNode(), config.Version, config.Force); err != nil {
return err
}
}
- hydrateIdentifier(workflowSpec.Template.Id, config.Version, config.Force)
- for _, subWorkflow := range workflowSpec.SubWorkflows {
- for _, Noderef := range subWorkflow.Nodes {
+ hydrateIdentifier(workflowSpec.GetTemplate().GetId(), config.Version, config.Force)
+ for _, subWorkflow := range workflowSpec.GetSubWorkflows() {
+ for _, Noderef := range subWorkflow.GetNodes() {
if err := hydrateNode(Noderef, config.Version, config.Force); err != nil {
return err
}
@@ -504,11 +504,11 @@ func hydrateSpec(message proto.Message, uploadLocation storage.DataReference, co
return err
}
}
- hydrateIdentifier(subWorkflow.Id, config.Version, config.Force)
+ hydrateIdentifier(subWorkflow.GetId(), config.Version, config.Force)
}
case *admin.TaskSpec:
taskSpec := message.(*admin.TaskSpec)
- hydrateIdentifier(taskSpec.Template.Id, config.Version, config.Force)
+ hydrateIdentifier(taskSpec.GetTemplate().GetId(), config.Version, config.Force)
// In case of fast serialize input proto also have on additional variable to substitute i.e destination bucket for source code
if err := hydrateTaskSpec(taskSpec, uploadLocation, config.DestinationDirectory); err != nil {
return err
@@ -607,7 +607,7 @@ func readAndCopyArchive(src io.Reader, tempDir string, unarchivedFiles []string)
}
}
} else if header.Typeflag == tar.TypeReg {
- dest, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode))
+ dest, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode)) // #nosec G115
if err != nil {
return unarchivedFiles, err
}
@@ -814,8 +814,8 @@ func uploadFastRegisterArtifact(ctx context.Context, project, domain, sourceCode
}
}
- if resp != nil && len(resp.SignedUrl) > 0 {
- return storage.DataReference(resp.NativeUrl), DirectUpload(resp.SignedUrl, h, size, dataRefReaderCloser)
+ if resp != nil && len(resp.GetSignedUrl()) > 0 {
+ return storage.DataReference(resp.GetNativeUrl()), DirectUpload(resp.GetSignedUrl(), h, size, dataRefReaderCloser)
}
dataStore, err := getStorageClient(ctx)
diff --git a/flytectl/cmd/register/register_util_test.go b/flytectl/cmd/register/register_util_test.go
index e068c0f64a..fbe6e8b6f2 100644
--- a/flytectl/cmd/register/register_util_test.go
+++ b/flytectl/cmd/register/register_util_test.go
@@ -359,8 +359,8 @@ func TestHydrateLaunchPlanSpec(t *testing.T) {
lpSpec := &admin.LaunchPlanSpec{}
err := hydrateLaunchPlanSpec(rconfig.DefaultFilesConfig.AssumableIamRole, rconfig.DefaultFilesConfig.K8sServiceAccount, rconfig.DefaultFilesConfig.OutputLocationPrefix, lpSpec)
assert.Nil(t, err)
- assert.Equal(t, &admin.AuthRole{AssumableIamRole: "iamRole"}, lpSpec.AuthRole)
- assert.Equal(t, &core.SecurityContext{RunAs: &core.Identity{IamRole: "iamRole"}}, lpSpec.SecurityContext)
+ assert.Equal(t, &admin.AuthRole{AssumableIamRole: "iamRole"}, lpSpec.GetAuthRole())
+ assert.Equal(t, &core.SecurityContext{RunAs: &core.Identity{IamRole: "iamRole"}}, lpSpec.GetSecurityContext())
})
t.Run("k8sService account override", func(t *testing.T) {
registerFilesSetup()
@@ -368,8 +368,8 @@ func TestHydrateLaunchPlanSpec(t *testing.T) {
lpSpec := &admin.LaunchPlanSpec{}
err := hydrateLaunchPlanSpec(rconfig.DefaultFilesConfig.AssumableIamRole, rconfig.DefaultFilesConfig.K8sServiceAccount, rconfig.DefaultFilesConfig.OutputLocationPrefix, lpSpec)
assert.Nil(t, err)
- assert.Equal(t, &admin.AuthRole{KubernetesServiceAccount: "k8Account"}, lpSpec.AuthRole)
- assert.Equal(t, &core.SecurityContext{RunAs: &core.Identity{K8SServiceAccount: "k8Account"}}, lpSpec.SecurityContext)
+ assert.Equal(t, &admin.AuthRole{KubernetesServiceAccount: "k8Account"}, lpSpec.GetAuthRole())
+ assert.Equal(t, &core.SecurityContext{RunAs: &core.Identity{K8SServiceAccount: "k8Account"}}, lpSpec.GetSecurityContext())
})
t.Run("Both k8sService and IamRole", func(t *testing.T) {
registerFilesSetup()
@@ -379,8 +379,8 @@ func TestHydrateLaunchPlanSpec(t *testing.T) {
err := hydrateLaunchPlanSpec(rconfig.DefaultFilesConfig.AssumableIamRole, rconfig.DefaultFilesConfig.K8sServiceAccount, rconfig.DefaultFilesConfig.OutputLocationPrefix, lpSpec)
assert.Nil(t, err)
assert.Equal(t, &admin.AuthRole{AssumableIamRole: "iamRole",
- KubernetesServiceAccount: "k8Account"}, lpSpec.AuthRole)
- assert.Equal(t, &core.SecurityContext{RunAs: &core.Identity{IamRole: "iamRole", K8SServiceAccount: "k8Account"}}, lpSpec.SecurityContext)
+ KubernetesServiceAccount: "k8Account"}, lpSpec.GetAuthRole())
+ assert.Equal(t, &core.SecurityContext{RunAs: &core.Identity{IamRole: "iamRole", K8SServiceAccount: "k8Account"}}, lpSpec.GetSecurityContext())
})
t.Run("Output prefix", func(t *testing.T) {
registerFilesSetup()
@@ -388,7 +388,7 @@ func TestHydrateLaunchPlanSpec(t *testing.T) {
lpSpec := &admin.LaunchPlanSpec{}
err := hydrateLaunchPlanSpec(rconfig.DefaultFilesConfig.AssumableIamRole, rconfig.DefaultFilesConfig.K8sServiceAccount, rconfig.DefaultFilesConfig.OutputLocationPrefix, lpSpec)
assert.Nil(t, err)
- assert.Equal(t, &admin.RawOutputDataConfig{OutputLocationPrefix: "prefix"}, lpSpec.RawOutputDataConfig)
+ assert.Equal(t, &admin.RawOutputDataConfig{OutputLocationPrefix: "prefix"}, lpSpec.GetRawOutputDataConfig())
})
}
@@ -648,7 +648,7 @@ func TestHydrateTaskSpec(t *testing.T) {
err = hydrateTaskSpec(task, storage.DataReference("file://somewhere"), "sourcey")
assert.NoError(t, err)
var hydratedPodSpec = v1.PodSpec{}
- err = utils.UnmarshalStructToObj(task.Template.GetK8SPod().PodSpec, &hydratedPodSpec)
+ err = utils.UnmarshalStructToObj(task.GetTemplate().GetK8SPod().GetPodSpec(), &hydratedPodSpec)
assert.NoError(t, err)
assert.Len(t, hydratedPodSpec.Containers[1].Args, 2)
assert.Contains(t, hydratedPodSpec.Containers[1].Args[1], "somewhere")
diff --git a/flytectl/cmd/update/execution.go b/flytectl/cmd/update/execution.go
index 70f34b342f..efe8e64be7 100644
--- a/flytectl/cmd/update/execution.go
+++ b/flytectl/cmd/update/execution.go
@@ -36,13 +36,13 @@ func updateExecutionFunc(ctx context.Context, args []string, cmdCtx cmdCore.Comm
project := config.GetConfig().Project
domain := config.GetConfig().Domain
if len(args) != 1 {
- return fmt.Errorf(clierrors.ErrExecutionNotPassed)
+ return fmt.Errorf(clierrors.ErrExecutionNotPassed) //nolint
}
executionName := args[0]
activate := execution.UConfig.Activate
archive := execution.UConfig.Archive
if activate && archive {
- return fmt.Errorf(clierrors.ErrInvalidStateUpdate)
+ return fmt.Errorf(clierrors.ErrInvalidStateUpdate) //nolint
}
var newState admin.ExecutionState
diff --git a/flytectl/cmd/update/execution_test.go b/flytectl/cmd/update/execution_test.go
index fbcb0b02e9..4fde5683a5 100644
--- a/flytectl/cmd/update/execution_test.go
+++ b/flytectl/cmd/update/execution_test.go
@@ -28,7 +28,7 @@ func TestExecutionCanBeActivated(t *testing.T) {
t, "UpdateExecution", s.Ctx,
mock.MatchedBy(
func(r *admin.ExecutionUpdateRequest) bool {
- return r.State == admin.ExecutionState_EXECUTION_ACTIVE
+ return r.GetState() == admin.ExecutionState_EXECUTION_ACTIVE
}))
})
}
@@ -47,7 +47,7 @@ func TestExecutionCanBeArchived(t *testing.T) {
t, "UpdateExecution", s.Ctx,
mock.MatchedBy(
func(r *admin.ExecutionUpdateRequest) bool {
- return r.State == admin.ExecutionState_EXECUTION_ARCHIVED
+ return r.GetState() == admin.ExecutionState_EXECUTION_ARCHIVED
}))
})
}
@@ -146,7 +146,7 @@ func TestExecutionUpdateFailsWhenExecutionDoesNotExist(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, execution *admin.Execution) {
s.FetcherExt.
- OnFetchExecution(s.Ctx, execution.Id.Name, execution.Id.Project, execution.Id.Domain).
+ OnFetchExecution(s.Ctx, execution.GetId().GetName(), execution.GetId().GetProject(), execution.GetId().GetDomain()).
Return(nil, ext.NewNotFoundError("execution not found"))
s.MockAdminClient.
OnUpdateExecutionMatch(s.Ctx, mock.Anything).
@@ -165,7 +165,7 @@ func TestExecutionUpdateFailsWhenAdminClientFails(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, execution *admin.Execution) {
s.FetcherExt.
- OnFetchExecution(s.Ctx, execution.Id.Name, execution.Id.Project, execution.Id.Domain).
+ OnFetchExecution(s.Ctx, execution.GetId().GetName(), execution.GetId().GetProject(), execution.GetId().GetDomain()).
Return(execution, nil)
s.MockAdminClient.
OnUpdateExecutionMatch(s.Ctx, mock.Anything).
@@ -200,7 +200,7 @@ func testExecutionUpdate(
t,
/* mockSetup */ func(s *testutils.TestStruct, execution *admin.Execution) {
s.FetcherExt.
- OnFetchExecution(s.Ctx, execution.Id.Name, execution.Id.Project, execution.Id.Domain).
+ OnFetchExecution(s.Ctx, execution.GetId().GetName(), execution.GetId().GetProject(), execution.GetId().GetDomain()).
Return(execution, nil)
s.MockAdminClient.
OnUpdateExecutionMatch(s.Ctx, mock.Anything).
@@ -230,7 +230,7 @@ func testExecutionUpdateWithMockSetup(
setup(&s, execution.UConfig, target)
}
- args := []string{target.Id.Name}
+ args := []string{target.GetId().GetName()}
err := updateExecutionFunc(s.Ctx, args, s.CmdCtx)
if asserter != nil {
diff --git a/flytectl/cmd/update/launch_plan.go b/flytectl/cmd/update/launch_plan.go
index 466551667e..c847e92e6b 100644
--- a/flytectl/cmd/update/launch_plan.go
+++ b/flytectl/cmd/update/launch_plan.go
@@ -36,12 +36,12 @@ func updateLPFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandCont
project := config.GetConfig().Project
domain := config.GetConfig().Domain
if len(args) != 1 {
- return fmt.Errorf(clierrors.ErrLPNotPassed)
+ return fmt.Errorf(clierrors.ErrLPNotPassed) //nolint
}
name := args[0]
version := launchplan.UConfig.Version
if len(version) == 0 {
- return fmt.Errorf(clierrors.ErrLPVersionNotPassed)
+ return fmt.Errorf(clierrors.ErrLPVersionNotPassed) //nolint
}
activate := launchplan.UConfig.Activate
@@ -55,7 +55,7 @@ func updateLPFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandCont
deactivate = launchplan.UConfig.Deactivate
}
if activate == deactivate && deactivate {
- return fmt.Errorf(clierrors.ErrInvalidBothStateUpdate)
+ return fmt.Errorf(clierrors.ErrInvalidBothStateUpdate) //nolint
}
var newState admin.LaunchPlanState
diff --git a/flytectl/cmd/update/launch_plan_meta.go b/flytectl/cmd/update/launch_plan_meta.go
index 7b1c93fd85..51b6c6769e 100644
--- a/flytectl/cmd/update/launch_plan_meta.go
+++ b/flytectl/cmd/update/launch_plan_meta.go
@@ -37,7 +37,7 @@ func getUpdateLPMetaFunc(namedEntityConfig *NamedEntityConfig) func(ctx context.
project := config.GetConfig().Project
domain := config.GetConfig().Domain
if len(args) != 1 {
- return fmt.Errorf(clierrors.ErrLPNotPassed)
+ return fmt.Errorf(clierrors.ErrLPNotPassed) //nolint
}
name := args[0]
err := namedEntityConfig.UpdateNamedEntity(ctx, name, project, domain, core.ResourceType_LAUNCH_PLAN, cmdCtx)
diff --git a/flytectl/cmd/update/launch_plan_test.go b/flytectl/cmd/update/launch_plan_test.go
index 249a810118..4238a205d5 100644
--- a/flytectl/cmd/update/launch_plan_test.go
+++ b/flytectl/cmd/update/launch_plan_test.go
@@ -28,7 +28,7 @@ func TestLaunchPlanCanBeActivated(t *testing.T) {
t, "UpdateLaunchPlan", s.Ctx,
mock.MatchedBy(
func(r *admin.LaunchPlanUpdateRequest) bool {
- return r.State == admin.LaunchPlanState_ACTIVE
+ return r.GetState() == admin.LaunchPlanState_ACTIVE
}))
})
}
@@ -47,7 +47,7 @@ func TestLaunchPlanCanBeArchived(t *testing.T) {
t, "UpdateLaunchPlan", s.Ctx,
mock.MatchedBy(
func(r *admin.LaunchPlanUpdateRequest) bool {
- return r.State == admin.LaunchPlanState_INACTIVE
+ return r.GetState() == admin.LaunchPlanState_INACTIVE
}))
})
}
@@ -66,7 +66,7 @@ func TestLaunchPlanCanBeDeactivated(t *testing.T) {
t, "UpdateLaunchPlan", s.Ctx,
mock.MatchedBy(
func(r *admin.LaunchPlanUpdateRequest) bool {
- return r.State == admin.LaunchPlanState_INACTIVE
+ return r.GetState() == admin.LaunchPlanState_INACTIVE
}))
})
}
@@ -275,8 +275,8 @@ func testLaunchPlanUpdateWithMockSetup(
setup(&s, launchplan.UConfig, target)
}
- args := []string{target.Id.Name}
- launchplan.UConfig.Version = target.Id.Version
+ args := []string{target.GetId().GetName()}
+ launchplan.UConfig.Version = target.GetId().GetVersion()
err := updateLPFunc(s.Ctx, args, s.CmdCtx)
if asserter != nil {
diff --git a/flytectl/cmd/update/matchable_cluster_resource_attribute_test.go b/flytectl/cmd/update/matchable_cluster_resource_attribute_test.go
index b7288d6dcc..90d4fca9f7 100644
--- a/flytectl/cmd/update/matchable_cluster_resource_attribute_test.go
+++ b/flytectl/cmd/update/matchable_cluster_resource_attribute_test.go
@@ -274,10 +274,10 @@ func TestClusterResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testi
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) {
s.FetcherExt.
- OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_CLUSTER_RESOURCE).
+ OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_CLUSTER_RESOURCE).
Return(nil, ext.NewNotFoundError("attribute"))
s.UpdaterExt.
- OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything).
+ OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything).
Return(nil)
},
/* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) {
@@ -296,10 +296,10 @@ func TestClusterResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testi
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) {
s.FetcherExt.
- OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_CLUSTER_RESOURCE).
+ OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_CLUSTER_RESOURCE).
Return(nil, ext.NewNotFoundError("attribute"))
s.UpdaterExt.
- OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything).
+ OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything).
Return(nil)
},
/* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) {
@@ -318,10 +318,10 @@ func TestClusterResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testi
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) {
s.FetcherExt.
- OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_CLUSTER_RESOURCE).
+ OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_CLUSTER_RESOURCE).
Return(nil, ext.NewNotFoundError("attribute"))
s.UpdaterExt.
- OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything).
+ OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything).
Return(nil)
},
/* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) {
@@ -342,10 +342,10 @@ func TestClusterResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) {
s.FetcherExt.
- OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_CLUSTER_RESOURCE).
+ OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_CLUSTER_RESOURCE).
Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything).
+ OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything).
Return(fmt.Errorf("network error"))
},
/* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) {
@@ -363,10 +363,10 @@ func TestClusterResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) {
s.FetcherExt.
- OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_CLUSTER_RESOURCE).
+ OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_CLUSTER_RESOURCE).
Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything).
+ OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything).
Return(fmt.Errorf("network error"))
},
/* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) {
@@ -384,10 +384,10 @@ func TestClusterResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) {
s.FetcherExt.
- OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_CLUSTER_RESOURCE).
+ OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_CLUSTER_RESOURCE).
Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything).
+ OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything).
Return(fmt.Errorf("network error"))
},
/* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) {
@@ -410,10 +410,10 @@ func testWorkflowClusterResourceAttributeUpdate(
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) {
s.FetcherExt.
- OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_CLUSTER_RESOURCE).
+ OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_CLUSTER_RESOURCE).
Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything).
+ OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything).
Return(nil)
},
setup,
@@ -479,10 +479,10 @@ func testProjectClusterResourceAttributeUpdate(
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) {
s.FetcherExt.
- OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_CLUSTER_RESOURCE).
+ OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_CLUSTER_RESOURCE).
Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything).
+ OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything).
Return(nil)
},
setup,
@@ -546,10 +546,10 @@ func testProjectDomainClusterResourceAttributeUpdate(
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) {
s.FetcherExt.
- OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_CLUSTER_RESOURCE).
+ OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_CLUSTER_RESOURCE).
Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything).
+ OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything).
Return(nil)
},
setup,
diff --git a/flytectl/cmd/update/matchable_execution_cluster_label_test.go b/flytectl/cmd/update/matchable_execution_cluster_label_test.go
index 1006234626..ba14dbe535 100644
--- a/flytectl/cmd/update/matchable_execution_cluster_label_test.go
+++ b/flytectl/cmd/update/matchable_execution_cluster_label_test.go
@@ -274,10 +274,10 @@ func TestExecutionClusterLabelUpdateSucceedsWhenAttributesDoNotExist(t *testing.
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) {
s.FetcherExt.
- OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_CLUSTER_LABEL).
+ OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_EXECUTION_CLUSTER_LABEL).
Return(nil, ext.NewNotFoundError("attribute"))
s.UpdaterExt.
- OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything).
+ OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything).
Return(nil)
},
/* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) {
@@ -296,10 +296,10 @@ func TestExecutionClusterLabelUpdateSucceedsWhenAttributesDoNotExist(t *testing.
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) {
s.FetcherExt.
- OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL).
+ OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_EXECUTION_CLUSTER_LABEL).
Return(nil, ext.NewNotFoundError("attribute"))
s.UpdaterExt.
- OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything).
+ OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything).
Return(nil)
},
/* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes) {
@@ -318,10 +318,10 @@ func TestExecutionClusterLabelUpdateSucceedsWhenAttributesDoNotExist(t *testing.
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) {
s.FetcherExt.
- OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_CLUSTER_LABEL).
+ OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_EXECUTION_CLUSTER_LABEL).
Return(nil, ext.NewNotFoundError("attribute"))
s.UpdaterExt.
- OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything).
+ OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything).
Return(nil)
},
/* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes) {
@@ -342,10 +342,10 @@ func TestExecutionClusterLabelUpdateFailsWhenAdminClientFails(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) {
s.FetcherExt.
- OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_CLUSTER_LABEL).
+ OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_EXECUTION_CLUSTER_LABEL).
Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything).
+ OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything).
Return(fmt.Errorf("network error"))
},
/* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) {
@@ -363,10 +363,10 @@ func TestExecutionClusterLabelUpdateFailsWhenAdminClientFails(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) {
s.FetcherExt.
- OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL).
+ OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_EXECUTION_CLUSTER_LABEL).
Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything).
+ OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything).
Return(fmt.Errorf("network error"))
},
/* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes) {
@@ -384,10 +384,10 @@ func TestExecutionClusterLabelUpdateFailsWhenAdminClientFails(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) {
s.FetcherExt.
- OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_CLUSTER_LABEL).
+ OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_EXECUTION_CLUSTER_LABEL).
Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything).
+ OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything).
Return(fmt.Errorf("network error"))
},
/* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes) {
@@ -410,10 +410,10 @@ func testWorkflowExecutionClusterLabelUpdate(
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) {
s.FetcherExt.
- OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_CLUSTER_LABEL).
+ OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_EXECUTION_CLUSTER_LABEL).
Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything).
+ OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything).
Return(nil)
},
setup,
@@ -475,10 +475,10 @@ func testProjectExecutionClusterLabelUpdate(
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) {
s.FetcherExt.
- OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_CLUSTER_LABEL).
+ OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_EXECUTION_CLUSTER_LABEL).
Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything).
+ OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything).
Return(nil)
},
setup,
@@ -538,10 +538,10 @@ func testProjectDomainExecutionClusterLabelUpdate(
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) {
s.FetcherExt.
- OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL).
+ OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_EXECUTION_CLUSTER_LABEL).
Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything).
+ OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything).
Return(nil)
},
setup,
diff --git a/flytectl/cmd/update/matchable_execution_queue_attribute_test.go b/flytectl/cmd/update/matchable_execution_queue_attribute_test.go
index e16526faa6..a240dfdd98 100644
--- a/flytectl/cmd/update/matchable_execution_queue_attribute_test.go
+++ b/flytectl/cmd/update/matchable_execution_queue_attribute_test.go
@@ -274,10 +274,10 @@ func TestExecutionQueueAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testin
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) {
s.FetcherExt.
- OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_QUEUE).
+ OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_EXECUTION_QUEUE).
Return(nil, ext.NewNotFoundError("attribute"))
s.UpdaterExt.
- OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything).
+ OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything).
Return(nil)
},
/* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) {
@@ -296,10 +296,10 @@ func TestExecutionQueueAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testin
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) {
s.FetcherExt.
- OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_QUEUE).
+ OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_EXECUTION_QUEUE).
Return(nil, ext.NewNotFoundError("attribute"))
s.UpdaterExt.
- OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything).
+ OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything).
Return(nil)
},
/* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) {
@@ -318,10 +318,10 @@ func TestExecutionQueueAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testin
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) {
s.FetcherExt.
- OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_QUEUE).
+ OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_EXECUTION_QUEUE).
Return(nil, ext.NewNotFoundError("attribute"))
s.UpdaterExt.
- OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything).
+ OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything).
Return(nil)
},
/* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes) {
@@ -342,10 +342,10 @@ func TestExecutionQueueAttributeUpdateFailsWhenAdminClientFails(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) {
s.FetcherExt.
- OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_QUEUE).
+ OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_EXECUTION_QUEUE).
Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything).
+ OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything).
Return(fmt.Errorf("network error"))
},
/* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) {
@@ -363,10 +363,10 @@ func TestExecutionQueueAttributeUpdateFailsWhenAdminClientFails(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) {
s.FetcherExt.
- OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_QUEUE).
+ OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_EXECUTION_QUEUE).
Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything).
+ OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything).
Return(fmt.Errorf("network error"))
},
/* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) {
@@ -384,10 +384,10 @@ func TestExecutionQueueAttributeUpdateFailsWhenAdminClientFails(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) {
s.FetcherExt.
- OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_QUEUE).
+ OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_EXECUTION_QUEUE).
Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything).
+ OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything).
Return(fmt.Errorf("network error"))
},
/* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes) {
@@ -410,10 +410,10 @@ func testWorkflowExecutionQueueAttributeUpdate(
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) {
s.FetcherExt.
- OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_QUEUE).
+ OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_EXECUTION_QUEUE).
Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything).
+ OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything).
Return(nil)
},
setup,
@@ -479,10 +479,10 @@ func testProjectExecutionQueueAttributeUpdate(
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) {
s.FetcherExt.
- OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_QUEUE).
+ OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_EXECUTION_QUEUE).
Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything).
+ OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything).
Return(nil)
},
setup,
@@ -546,10 +546,10 @@ func testProjectDomainExecutionQueueAttributeUpdate(
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) {
s.FetcherExt.
- OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_QUEUE).
+ OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_EXECUTION_QUEUE).
Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything).
+ OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything).
Return(nil)
},
setup,
diff --git a/flytectl/cmd/update/matchable_plugin_override_test.go b/flytectl/cmd/update/matchable_plugin_override_test.go
index 3b0181392b..649619be03 100644
--- a/flytectl/cmd/update/matchable_plugin_override_test.go
+++ b/flytectl/cmd/update/matchable_plugin_override_test.go
@@ -274,10 +274,10 @@ func TestPluginOverrideUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) {
s.FetcherExt.
- OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_PLUGIN_OVERRIDE).
+ OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_PLUGIN_OVERRIDE).
Return(nil, ext.NewNotFoundError("attribute"))
s.UpdaterExt.
- OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything).
+ OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything).
Return(nil)
},
/* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) {
@@ -296,10 +296,10 @@ func TestPluginOverrideUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) {
s.FetcherExt.
- OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_PLUGIN_OVERRIDE).
+ OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_PLUGIN_OVERRIDE).
Return(nil, ext.NewNotFoundError("attribute"))
s.UpdaterExt.
- OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything).
+ OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything).
Return(nil)
},
/* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes) {
@@ -318,10 +318,10 @@ func TestPluginOverrideUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) {
s.FetcherExt.
- OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_PLUGIN_OVERRIDE).
+ OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_PLUGIN_OVERRIDE).
Return(nil, ext.NewNotFoundError("attribute"))
s.UpdaterExt.
- OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything).
+ OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything).
Return(nil)
},
/* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes) {
@@ -342,10 +342,10 @@ func TestPluginOverrideUpdateFailsWhenAdminClientFails(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) {
s.FetcherExt.
- OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_PLUGIN_OVERRIDE).
+ OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_PLUGIN_OVERRIDE).
Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything).
+ OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything).
Return(fmt.Errorf("network error"))
},
/* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) {
@@ -363,10 +363,10 @@ func TestPluginOverrideUpdateFailsWhenAdminClientFails(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) {
s.FetcherExt.
- OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_PLUGIN_OVERRIDE).
+ OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_PLUGIN_OVERRIDE).
Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything).
+ OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything).
Return(fmt.Errorf("network error"))
},
/* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes) {
@@ -384,10 +384,10 @@ func TestPluginOverrideUpdateFailsWhenAdminClientFails(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) {
s.FetcherExt.
- OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_PLUGIN_OVERRIDE).
+ OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_PLUGIN_OVERRIDE).
Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything).
+ OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything).
Return(fmt.Errorf("network error"))
},
/* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes) {
@@ -410,10 +410,10 @@ func testWorkflowPluginOverrideUpdate(
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) {
s.FetcherExt.
- OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_PLUGIN_OVERRIDE).
+ OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_PLUGIN_OVERRIDE).
Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything).
+ OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything).
Return(nil)
},
setup,
@@ -485,10 +485,10 @@ func testProjectPluginOverrideUpdate(
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) {
s.FetcherExt.
- OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_PLUGIN_OVERRIDE).
+ OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_PLUGIN_OVERRIDE).
Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything).
+ OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything).
Return(nil)
},
setup,
@@ -558,10 +558,10 @@ func testProjectDomainPluginOverrideUpdate(
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) {
s.FetcherExt.
- OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_PLUGIN_OVERRIDE).
+ OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_PLUGIN_OVERRIDE).
Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything).
+ OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything).
Return(nil)
},
setup,
diff --git a/flytectl/cmd/update/matchable_task_resource_attribute_test.go b/flytectl/cmd/update/matchable_task_resource_attribute_test.go
index 42c2c3ab4f..2fffe2b5ec 100644
--- a/flytectl/cmd/update/matchable_task_resource_attribute_test.go
+++ b/flytectl/cmd/update/matchable_task_resource_attribute_test.go
@@ -274,10 +274,10 @@ func TestTaskResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testing.
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) {
s.FetcherExt.
- OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_TASK_RESOURCE).
+ OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_TASK_RESOURCE).
Return(nil, ext.NewNotFoundError("attribute"))
s.UpdaterExt.
- OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything).
+ OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything).
Return(nil)
},
/* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) {
@@ -296,10 +296,10 @@ func TestTaskResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testing.
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) {
s.FetcherExt.
- OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_TASK_RESOURCE).
+ OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_TASK_RESOURCE).
Return(nil, ext.NewNotFoundError("attribute"))
s.UpdaterExt.
- OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything).
+ OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything).
Return(nil)
},
/* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) {
@@ -318,10 +318,10 @@ func TestTaskResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testing.
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) {
s.FetcherExt.
- OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_TASK_RESOURCE).
+ OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_TASK_RESOURCE).
Return(nil, ext.NewNotFoundError("attribute"))
s.UpdaterExt.
- OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything).
+ OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything).
Return(nil)
},
/* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) {
@@ -342,10 +342,10 @@ func TestTaskResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) {
s.FetcherExt.
- OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_TASK_RESOURCE).
+ OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_TASK_RESOURCE).
Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything).
+ OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything).
Return(fmt.Errorf("network error"))
},
/* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) {
@@ -363,10 +363,10 @@ func TestTaskResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) {
s.FetcherExt.
- OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_TASK_RESOURCE).
+ OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_TASK_RESOURCE).
Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything).
+ OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything).
Return(fmt.Errorf("network error"))
},
/* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) {
@@ -384,10 +384,10 @@ func TestTaskResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) {
s.FetcherExt.
- OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_TASK_RESOURCE).
+ OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_TASK_RESOURCE).
Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything).
+ OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything).
Return(fmt.Errorf("network error"))
},
/* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) {
@@ -410,10 +410,10 @@ func testWorkflowTaskResourceAttributeUpdate(
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) {
s.FetcherExt.
- OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_TASK_RESOURCE).
+ OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_TASK_RESOURCE).
Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything).
+ OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything).
Return(nil)
},
setup,
@@ -478,10 +478,10 @@ func testProjectTaskResourceAttributeUpdate(
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) {
s.FetcherExt.
- OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_TASK_RESOURCE).
+ OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_TASK_RESOURCE).
Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything).
+ OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything).
Return(nil)
},
setup,
@@ -544,10 +544,10 @@ func testProjectDomainTaskResourceAttributeUpdate(
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) {
s.FetcherExt.
- OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_TASK_RESOURCE).
+ OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_TASK_RESOURCE).
Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything).
+ OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything).
Return(nil)
},
setup,
diff --git a/flytectl/cmd/update/matchable_workflow_execution_config_test.go b/flytectl/cmd/update/matchable_workflow_execution_config_test.go
index c75b2fd58f..e026a91a9b 100644
--- a/flytectl/cmd/update/matchable_workflow_execution_config_test.go
+++ b/flytectl/cmd/update/matchable_workflow_execution_config_test.go
@@ -274,10 +274,10 @@ func TestWorkflowExecutionConfigUpdateSucceedsWhenAttributesDoNotExist(t *testin
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) {
s.FetcherExt.
- OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG).
+ OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG).
Return(nil, ext.NewNotFoundError("attribute"))
s.UpdaterExt.
- OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything).
+ OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything).
Return(nil)
},
/* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) {
@@ -296,10 +296,10 @@ func TestWorkflowExecutionConfigUpdateSucceedsWhenAttributesDoNotExist(t *testin
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) {
s.FetcherExt.
- OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG).
+ OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG).
Return(nil, ext.NewNotFoundError("attribute"))
s.UpdaterExt.
- OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything).
+ OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything).
Return(nil)
},
/* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes) {
@@ -318,10 +318,10 @@ func TestWorkflowExecutionConfigUpdateSucceedsWhenAttributesDoNotExist(t *testin
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) {
s.FetcherExt.
- OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG).
+ OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG).
Return(nil, ext.NewNotFoundError("attribute"))
s.UpdaterExt.
- OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything).
+ OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything).
Return(nil)
},
/* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes) {
@@ -342,10 +342,10 @@ func TestWorkflowExecutionConfigUpdateFailsWhenAdminClientFails(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) {
s.FetcherExt.
- OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG).
+ OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG).
Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything).
+ OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything).
Return(fmt.Errorf("network error"))
},
/* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) {
@@ -363,10 +363,10 @@ func TestWorkflowExecutionConfigUpdateFailsWhenAdminClientFails(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) {
s.FetcherExt.
- OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG).
+ OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG).
Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything).
+ OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything).
Return(fmt.Errorf("network error"))
},
/* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes) {
@@ -384,10 +384,10 @@ func TestWorkflowExecutionConfigUpdateFailsWhenAdminClientFails(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) {
s.FetcherExt.
- OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG).
+ OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG).
Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything).
+ OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything).
Return(fmt.Errorf("network error"))
},
/* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes) {
@@ -410,10 +410,10 @@ func testWorkflowExecutionConfigUpdate(
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) {
s.FetcherExt.
- OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG).
+ OnFetchWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG).
Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything).
+ OnUpdateWorkflowAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), target.GetWorkflow(), mock.Anything).
Return(nil)
},
setup,
@@ -482,10 +482,10 @@ func testProjectWorkflowExecutionConfigUpdate(
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) {
s.FetcherExt.
- OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG).
+ OnFetchProjectAttributesMatch(s.Ctx, target.GetProject(), admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG).
Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything).
+ OnUpdateProjectAttributesMatch(s.Ctx, target.GetProject(), mock.Anything).
Return(nil)
},
setup,
@@ -552,10 +552,10 @@ func testProjectDomainWorkflowExecutionConfigUpdate(
t,
/* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) {
s.FetcherExt.
- OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG).
+ OnFetchProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG).
Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil)
s.UpdaterExt.
- OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything).
+ OnUpdateProjectDomainAttributesMatch(s.Ctx, target.GetProject(), target.GetDomain(), mock.Anything).
Return(nil)
},
setup,
diff --git a/flytectl/cmd/update/named_entity.go b/flytectl/cmd/update/named_entity.go
index 61bbffc705..5e99775e14 100644
--- a/flytectl/cmd/update/named_entity.go
+++ b/flytectl/cmd/update/named_entity.go
@@ -28,7 +28,7 @@ type NamedEntityConfig struct {
func (cfg NamedEntityConfig) UpdateNamedEntity(ctx context.Context, name string, project string, domain string, rsType core.ResourceType, cmdCtx cmdCore.CommandContext) error {
if cfg.Activate && cfg.Archive {
- return fmt.Errorf(clierrors.ErrInvalidStateUpdate)
+ return fmt.Errorf(clierrors.ErrInvalidStateUpdate) //nolint
}
id := &admin.NamedEntityIdentifier{
@@ -45,7 +45,7 @@ func (cfg NamedEntityConfig) UpdateNamedEntity(ctx context.Context, name string,
return fmt.Errorf("update metadata for %s: could not fetch metadata: %w", name, err)
}
- oldMetadata, newMetadata := composeNamedMetadataEdits(cfg, namedEntity.Metadata)
+ oldMetadata, newMetadata := composeNamedMetadataEdits(cfg, namedEntity.GetMetadata())
patch, err := DiffAsYaml(diffPathBefore, diffPathAfter, oldMetadata, newMetadata)
if err != nil {
panic(err)
@@ -86,15 +86,15 @@ func composeNamedMetadataEdits(config NamedEntityConfig, current *admin.NamedEnt
case config.Activate && config.Archive:
panic("cannot both activate and archive")
case config.Activate:
- old.State = current.State
+ old.State = current.GetState()
new.State = admin.NamedEntityState_NAMED_ENTITY_ACTIVE
case config.Archive:
- old.State = current.State
+ old.State = current.GetState()
new.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED
}
if config.Description != "" {
- old.Description = current.Description
+ old.Description = current.GetDescription()
new.Description = config.Description
}
diff --git a/flytectl/cmd/update/named_entity_test.go b/flytectl/cmd/update/named_entity_test.go
index 4d4e5b2783..1f8e28a525 100644
--- a/flytectl/cmd/update/named_entity_test.go
+++ b/flytectl/cmd/update/named_entity_test.go
@@ -59,7 +59,7 @@ func testNamedEntityUpdateWithMockSetup(
updateMetadataFactory := getUpdateMetadataFactory(resourceType)
- args := []string{target.Id.Name}
+ args := []string{target.GetId().GetName()}
err := updateMetadataFactory(config)(s.Ctx, args, s.CmdCtx)
if asserter != nil {
diff --git a/flytectl/cmd/update/project.go b/flytectl/cmd/update/project.go
index f6196e35ff..3a779df476 100644
--- a/flytectl/cmd/update/project.go
+++ b/flytectl/cmd/update/project.go
@@ -103,13 +103,13 @@ func updateProjectsFunc(ctx context.Context, args []string, cmdCtx cmdCore.Comma
return err
}
- if edits.Id == "" {
- return fmt.Errorf(clierrors.ErrProjectNotPassed)
+ if edits.GetId() == "" {
+ return fmt.Errorf(clierrors.ErrProjectNotPassed) //nolint
}
- currentProject, err := cmdCtx.AdminFetcherExt().GetProjectByID(ctx, edits.Id)
+ currentProject, err := cmdCtx.AdminFetcherExt().GetProjectByID(ctx, edits.GetId())
if err != nil {
- return fmt.Errorf("update project %s: could not fetch project: %w", edits.Id, err)
+ return fmt.Errorf("update project %s: could not fetch project: %w", edits.GetId(), err)
}
// We do not compare currentProject against edits directly, because edits does not
@@ -139,10 +139,10 @@ func updateProjectsFunc(ctx context.Context, args []string, cmdCtx cmdCore.Comma
_, err = cmdCtx.AdminClient().UpdateProject(ctx, edits)
if err != nil {
- return fmt.Errorf(clierrors.ErrFailedProjectUpdate, edits.Id, err)
+ return fmt.Errorf(clierrors.ErrFailedProjectUpdate, edits.GetId(), err)
}
- fmt.Printf("project %s updated\n", edits.Id)
+ fmt.Printf("project %s updated\n", edits.GetId())
return nil
}
@@ -152,14 +152,14 @@ func updateProjectsFunc(ctx context.Context, args []string, cmdCtx cmdCore.Comma
func copyProjectWithEdits(target *admin.Project, edited *admin.Project, projectConfig *project.ConfigProject) *admin.Project {
copy := *target
- if edited.Name != "" {
- copy.Name = edited.Name
+ if edited.GetName() != "" {
+ copy.Name = edited.GetName()
}
- if edited.Description != "" {
- copy.Description = edited.Description
+ if edited.GetDescription() != "" {
+ copy.Description = edited.GetDescription()
}
if len(edited.GetLabels().GetValues()) != 0 {
- copy.Labels = edited.Labels
+ copy.Labels = edited.GetLabels()
}
// `edited` comes with `admin.Project_ACTIVE` state by default
@@ -182,9 +182,9 @@ func copyProjectWithEdits(target *admin.Project, edited *admin.Project, projectC
// YAML file input, and the flags for `ConfigProject` would also
// be good.
if projectConfig.Archive || projectConfig.Activate {
- copy.State = edited.State
+ copy.State = edited.GetState()
} else {
- edited.State = copy.State
+ edited.State = copy.GetState()
}
return ©
}
diff --git a/flytectl/cmd/update/project_test.go b/flytectl/cmd/update/project_test.go
index 0ca41c4309..2451163942 100644
--- a/flytectl/cmd/update/project_test.go
+++ b/flytectl/cmd/update/project_test.go
@@ -27,7 +27,7 @@ func TestProjectCanBeActivated(t *testing.T) {
t, "UpdateProject", s.Ctx,
mock.MatchedBy(
func(r *admin.Project) bool {
- return r.State == admin.Project_ACTIVE
+ return r.GetState() == admin.Project_ACTIVE
}))
})
}
@@ -46,7 +46,7 @@ func TestProjectCanBeArchived(t *testing.T) {
t, "UpdateProject", s.Ctx,
mock.MatchedBy(
func(r *admin.Project) bool {
- return r.State == admin.Project_ARCHIVED
+ return r.GetState() == admin.Project_ARCHIVED
}))
})
}
@@ -145,7 +145,7 @@ func TestProjectUpdateFailsWhenProjectDoesNotExist(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, project *admin.Project) {
s.FetcherExt.
- OnGetProjectByID(s.Ctx, project.Id).
+ OnGetProjectByID(s.Ctx, project.GetId()).
Return(nil, ext.NewNotFoundError("project not found"))
s.MockAdminClient.
OnUpdateProjectMatch(s.Ctx, mock.Anything).
@@ -164,7 +164,7 @@ func TestProjectUpdateFailsWhenAdminClientFails(t *testing.T) {
t,
/* mockSetup */ func(s *testutils.TestStruct, project *admin.Project) {
s.FetcherExt.
- OnGetProjectByID(s.Ctx, project.Id).
+ OnGetProjectByID(s.Ctx, project.GetId()).
Return(project, nil)
s.MockAdminClient.
OnUpdateProjectMatch(s.Ctx, mock.Anything).
@@ -209,7 +209,7 @@ func TestProjectUpdateDoesNotActivateArchivedProject(t *testing.T) {
t, "UpdateProject", s.Ctx,
mock.MatchedBy(
func(r *admin.Project) bool {
- return r.State == admin.Project_ARCHIVED
+ return r.GetState() == admin.Project_ARCHIVED
}))
})
}
@@ -223,7 +223,7 @@ func testProjectUpdate(
t,
/* mockSetup */ func(s *testutils.TestStruct, project *admin.Project) {
s.FetcherExt.
- OnGetProjectByID(s.Ctx, project.Id).
+ OnGetProjectByID(s.Ctx, project.GetId()).
Return(project, nil)
s.MockAdminClient.
OnUpdateProjectMatch(s.Ctx, mock.Anything).
@@ -249,7 +249,7 @@ func testProjectUpdateWithMockSetup(
}
project.DefaultProjectConfig = &project.ConfigProject{
- ID: target.Id,
+ ID: target.GetId(),
}
config.GetConfig().Project = ""
config.GetConfig().Domain = ""
diff --git a/flytectl/cmd/update/task_meta.go b/flytectl/cmd/update/task_meta.go
index 3783c2dcfc..8e68778c99 100644
--- a/flytectl/cmd/update/task_meta.go
+++ b/flytectl/cmd/update/task_meta.go
@@ -37,7 +37,7 @@ func getUpdateTaskFunc(namedEntityConfig *NamedEntityConfig) func(ctx context.Co
project := config.GetConfig().Project
domain := config.GetConfig().Domain
if len(args) != 1 {
- return fmt.Errorf(clierrors.ErrTaskNotPassed)
+ return fmt.Errorf(clierrors.ErrTaskNotPassed) //nolint
}
name := args[0]
diff --git a/flytectl/cmd/update/workflow_meta.go b/flytectl/cmd/update/workflow_meta.go
index e2a416e0aa..c6604bfb86 100644
--- a/flytectl/cmd/update/workflow_meta.go
+++ b/flytectl/cmd/update/workflow_meta.go
@@ -37,7 +37,7 @@ func getUpdateWorkflowFunc(namedEntityConfig *NamedEntityConfig) func(ctx contex
project := config.GetConfig().Project
domain := config.GetConfig().Domain
if len(args) != 1 {
- return fmt.Errorf(clierrors.ErrWorkflowNotPassed)
+ return fmt.Errorf(clierrors.ErrWorkflowNotPassed) //nolint
}
name := args[0]
err := namedEntityConfig.UpdateNamedEntity(ctx, name, project, domain, core.ResourceType_WORKFLOW, cmdCtx)
diff --git a/flytectl/cmd/version/version.go b/flytectl/cmd/version/version.go
index 88da1330a2..67a28f3531 100644
--- a/flytectl/cmd/version/version.go
+++ b/flytectl/cmd/version/version.go
@@ -103,9 +103,9 @@ func getControlPlaneVersion(ctx context.Context, cmdCtx cmdCore.CommandContext)
}
// Print FlyteAdmin
if err := printVersion(versionOutput{
- Build: v.ControlPlaneVersion.Build,
- BuildTime: v.ControlPlaneVersion.BuildTime,
- Version: v.ControlPlaneVersion.Version,
+ Build: v.GetControlPlaneVersion().GetBuild(),
+ BuildTime: v.GetControlPlaneVersion().GetBuildTime(),
+ Version: v.GetControlPlaneVersion().GetVersion(),
App: controlPlanAppName,
}); err != nil {
return fmt.Errorf("Unable to get the control plane version. Please try again: %v", err)
diff --git a/flytectl/pkg/bubbletea/bubbletea_pagination.go b/flytectl/pkg/bubbletea/bubbletea_pagination.go
index 01a4b7ea98..bc76aaec1c 100644
--- a/flytectl/pkg/bubbletea/bubbletea_pagination.go
+++ b/flytectl/pkg/bubbletea/bubbletea_pagination.go
@@ -147,7 +147,7 @@ func Paginator(_listHeader []printer.Column, _callback DataCallback, _filter fil
listHeader = _listHeader
callback = _callback
filter = _filter
- filter.Page = int32(_max(int(filter.Page), 1))
+ filter.Page = max(filter.Page, 1)
firstBatchIndex = (int(filter.Page) - 1) / pagePerBatch
lastBatchIndex = firstBatchIndex
diff --git a/flytectl/pkg/bubbletea/bubbletea_pagination_util.go b/flytectl/pkg/bubbletea/bubbletea_pagination_util.go
index dc6ddd735b..3d2a02dd09 100644
--- a/flytectl/pkg/bubbletea/bubbletea_pagination_util.go
+++ b/flytectl/pkg/bubbletea/bubbletea_pagination_util.go
@@ -65,23 +65,9 @@ func (p printTableProto) MarshalJSON() ([]byte, error) {
return buf.Bytes(), nil
}
-func _max(a, b int) int {
- if a > b {
- return a
- }
- return b
-}
-
-func _min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
-
func getSliceBounds(m *pageModel) (start int, end int) {
start = (m.paginator.Page - firstBatchIndex*pagePerBatch) * msgPerPage
- end = _min(start+msgPerPage, len(*m.items))
+ end = min(start+msgPerPage, len(*m.items))
return start, end
}
@@ -117,7 +103,7 @@ func getMessageList(batchIndex int) ([]proto.Message, error) {
msg, err := callback(filters.Filters{
Limit: msgPerBatch,
- Page: int32(batchIndex + 1),
+ Page: int32(batchIndex + 1), // #nosec G115
SortBy: filter.SortBy,
Asc: filter.Asc,
})
diff --git a/flytectl/pkg/ext/launch_plan_fetcher.go b/flytectl/pkg/ext/launch_plan_fetcher.go
index 5a8befc093..8f047f681c 100644
--- a/flytectl/pkg/ext/launch_plan_fetcher.go
+++ b/flytectl/pkg/ext/launch_plan_fetcher.go
@@ -19,10 +19,10 @@ func (a *AdminFetcherExtClient) FetchAllVerOfLP(ctx context.Context, lpName, pro
if err != nil {
return nil, err
}
- if len(tList.LaunchPlans) == 0 {
+ if len(tList.GetLaunchPlans()) == 0 {
return nil, fmt.Errorf("no launchplans retrieved for %v", lpName)
}
- return tList.LaunchPlans, nil
+ return tList.GetLaunchPlans(), nil
}
// FetchLPLatestVersion fetches latest version for give launch plan name
diff --git a/flytectl/pkg/ext/project_fetcher.go b/flytectl/pkg/ext/project_fetcher.go
index a1e83fdf70..f6495b8ff2 100644
--- a/flytectl/pkg/ext/project_fetcher.go
+++ b/flytectl/pkg/ext/project_fetcher.go
@@ -33,13 +33,13 @@ func (a *AdminFetcherExtClient) GetProjectByID(ctx context.Context, projectID st
return nil, err
}
- if len(response.Projects) == 0 {
+ if len(response.GetProjects()) == 0 {
return nil, NewNotFoundError("project %s", projectID)
}
- if len(response.Projects) > 1 {
- panic(fmt.Sprintf("unexpected number of projects in ListProjects response: %d - 0 or 1 expected", len(response.Projects)))
+ if len(response.GetProjects()) > 1 {
+ panic(fmt.Sprintf("unexpected number of projects in ListProjects response: %d - 0 or 1 expected", len(response.GetProjects())))
}
- return response.Projects[0], nil
+ return response.GetProjects()[0], nil
}
diff --git a/flytectl/pkg/ext/task_fetcher.go b/flytectl/pkg/ext/task_fetcher.go
index 53c0acccb0..d602ef59b3 100644
--- a/flytectl/pkg/ext/task_fetcher.go
+++ b/flytectl/pkg/ext/task_fetcher.go
@@ -18,10 +18,10 @@ func (a *AdminFetcherExtClient) FetchAllVerOfTask(ctx context.Context, name, pro
if err != nil {
return nil, err
}
- if len(tList.Tasks) == 0 {
+ if len(tList.GetTasks()) == 0 {
return nil, fmt.Errorf("no tasks retrieved for %v", name)
}
- return tList.Tasks, nil
+ return tList.GetTasks(), nil
}
func (a *AdminFetcherExtClient) FetchTaskLatestVersion(ctx context.Context, name, project, domain string, filter filters.Filters) (*admin.Task, error) {
diff --git a/flytectl/pkg/ext/workflow_fetcher.go b/flytectl/pkg/ext/workflow_fetcher.go
index 69032bb998..0aacdd756a 100644
--- a/flytectl/pkg/ext/workflow_fetcher.go
+++ b/flytectl/pkg/ext/workflow_fetcher.go
@@ -19,10 +19,10 @@ func (a *AdminFetcherExtClient) FetchAllVerOfWorkflow(ctx context.Context, workf
if err != nil {
return nil, err
}
- if len(wList.Workflows) == 0 {
+ if len(wList.GetWorkflows()) == 0 {
return nil, fmt.Errorf("no workflow retrieved for %v", workflowName)
}
- return wList.Workflows, nil
+ return wList.GetWorkflows(), nil
}
// FetchAllWorkflows fetches all workflows in project domain
@@ -35,10 +35,10 @@ func (a *AdminFetcherExtClient) FetchAllWorkflows(ctx context.Context, project,
if err != nil {
return nil, err
}
- if len(wList.Entities) == 0 {
+ if len(wList.GetEntities()) == 0 {
return nil, fmt.Errorf("no workflow retrieved for %v project %v domain", project, domain)
}
- return wList.Entities, nil
+ return wList.GetEntities(), nil
}
// FetchWorkflowLatestVersion fetches latest version for given workflow name
@@ -53,7 +53,7 @@ func (a *AdminFetcherExtClient) FetchWorkflowLatestVersion(ctx context.Context,
if err != nil {
return nil, err
}
- return a.FetchWorkflowVersion(ctx, name, wVersions[0].Id.Version, project, domain)
+ return a.FetchWorkflowVersion(ctx, name, wVersions[0].GetId().GetVersion(), project, domain)
}
// FetchWorkflowVersion fetches particular version of workflow
diff --git a/flytectl/pkg/filters/util.go b/flytectl/pkg/filters/util.go
index a19481e32d..aed4d25f16 100644
--- a/flytectl/pkg/filters/util.go
+++ b/flytectl/pkg/filters/util.go
@@ -13,7 +13,7 @@ func BuildResourceListRequestWithName(c Filters, project, domain, name string) (
return nil, err
}
request := &admin.ResourceListRequest{
- Limit: uint32(c.Limit),
+ Limit: uint32(c.Limit), // #nosec G115
Token: getToken(c),
Filters: fieldSelector,
Id: &admin.NamedEntityIdentifier{
@@ -36,7 +36,7 @@ func BuildNamedEntityListRequest(c Filters, project, domain string, resourceType
return nil, err
}
request := &admin.NamedEntityListRequest{
- Limit: uint32(c.Limit),
+ Limit: uint32(c.Limit), // #nosec G115
Token: getToken(c),
Filters: fieldSelector,
Project: project,
@@ -55,7 +55,7 @@ func BuildProjectListRequest(c Filters) (*admin.ProjectListRequest, error) {
return nil, err
}
request := &admin.ProjectListRequest{
- Limit: uint32(c.Limit),
+ Limit: uint32(c.Limit), // #nosec G115
Token: getToken(c),
Filters: fieldSelector,
SortBy: buildSortingRequest(c),
diff --git a/flytectl/pkg/k8s/k8s_test.go b/flytectl/pkg/k8s/k8s_test.go
index 84dc16923c..41ea4eaafa 100644
--- a/flytectl/pkg/k8s/k8s_test.go
+++ b/flytectl/pkg/k8s/k8s_test.go
@@ -45,7 +45,8 @@ users:
t.Error(err)
}
defer os.Remove(tmpfile.Name())
- if err := ioutil.WriteFile(tmpfile.Name(), []byte(content), os.ModePerm); err != nil {
+ // #nosec G306
+ if err := os.WriteFile(tmpfile.Name(), []byte(content), os.ModePerm); err != nil {
t.Error(err)
}
t.Run("Create client from config", func(t *testing.T) {
diff --git a/flytectl/pkg/printer/printer.go b/flytectl/pkg/printer/printer.go
index df7effd8a1..bc67d75e9d 100644
--- a/flytectl/pkg/printer/printer.go
+++ b/flytectl/pkg/printer/printer.go
@@ -199,8 +199,8 @@ func FormatVariableDescriptions(variableMap map[string]*core.Variable) {
for _, k := range keys {
v := variableMap[k]
// a: a isn't very helpful
- if k != v.Description {
- descriptions = append(descriptions, getTruncatedLine(fmt.Sprintf("%s: %s", k, v.Description)))
+ if k != v.GetDescription() {
+ descriptions = append(descriptions, getTruncatedLine(fmt.Sprintf("%s: %s", k, v.GetDescription())))
} else {
descriptions = append(descriptions, getTruncatedLine(k))
}
@@ -220,12 +220,12 @@ func FormatParameterDescriptions(parameterMap map[string]*core.Parameter) {
var descriptions []string
for _, k := range keys {
v := parameterMap[k]
- if v.Var == nil {
+ if v.GetVar() == nil {
continue
}
// a: a isn't very helpful
- if k != v.Var.Description {
- descriptions = append(descriptions, getTruncatedLine(fmt.Sprintf("%s: %s", k, v.Var.Description)))
+ if k != v.GetVar().GetDescription() {
+ descriptions = append(descriptions, getTruncatedLine(fmt.Sprintf("%s: %s", k, v.GetVar().GetDescription())))
} else {
descriptions = append(descriptions, getTruncatedLine(k))
}
@@ -272,7 +272,7 @@ func (p Printer) Print(format OutputFormat, columns []Column, messages ...proto.
return fmt.Errorf("at least one workflow required for visualization")
}
workflow := workflows[0]
- graphStr, err := visualize.RenderWorkflow(workflow.Closure.CompiledWorkflow)
+ graphStr, err := visualize.RenderWorkflow(workflow.GetClosure().GetCompiledWorkflow())
if err != nil {
return errors.Wrapf("VisualizationError", err, "failed to visualize workflow")
}
diff --git a/flytectl/pkg/printer/printer_test.go b/flytectl/pkg/printer/printer_test.go
index afc5edb7b1..3783e5f7ca 100644
--- a/flytectl/pkg/printer/printer_test.go
+++ b/flytectl/pkg/printer/printer_test.go
@@ -282,7 +282,7 @@ func TestFormatVariableDescriptions(t *testing.T) {
"bar": barVar,
}
FormatVariableDescriptions(variableMap)
- assert.Equal(t, "bar\nfoo\nvar1: foo\nvar2: bar", variableMap[DefaultFormattedDescriptionsKey].Description)
+ assert.Equal(t, "bar\nfoo\nvar1: foo\nvar2: bar", variableMap[DefaultFormattedDescriptionsKey].GetDescription())
}
func TestFormatParameterDescriptions(t *testing.T) {
@@ -305,5 +305,5 @@ func TestFormatParameterDescriptions(t *testing.T) {
"empty": emptyParam,
}
FormatParameterDescriptions(paramMap)
- assert.Equal(t, "bar\nfoo\nvar1: foo\nvar2: bar", paramMap[DefaultFormattedDescriptionsKey].Var.Description)
+ assert.Equal(t, "bar\nfoo\nvar1: foo\nvar2: bar", paramMap[DefaultFormattedDescriptionsKey].GetVar().GetDescription())
}
diff --git a/flytectl/pkg/sandbox/start_test.go b/flytectl/pkg/sandbox/start_test.go
index 9d24295758..84a0a4fd35 100644
--- a/flytectl/pkg/sandbox/start_test.go
+++ b/flytectl/pkg/sandbox/start_test.go
@@ -123,7 +123,7 @@ func TestStartFunc(t *testing.T) {
config.DisableAgent = true
assert.Nil(t, util.SetupFlyteDir())
assert.Nil(t, os.MkdirAll(f.FilePathJoin(f.UserHomeDir(), ".flyte", "state"), os.ModePerm))
- assert.Nil(t, ioutil.WriteFile(docker.Kubeconfig, []byte(content), os.ModePerm))
+ assert.Nil(t, os.WriteFile(docker.Kubeconfig, []byte(content), os.ModePerm)) // #nosec G306
fakePod.SetName("flyte")
diff --git a/flytectl/pkg/util/util.go b/flytectl/pkg/util/util.go
index 18067d1702..49b1278c14 100644
--- a/flytectl/pkg/util/util.go
+++ b/flytectl/pkg/util/util.go
@@ -3,7 +3,6 @@ package util
import (
"fmt"
"io"
- "io/ioutil"
"net/http"
"os"
"path/filepath"
@@ -26,7 +25,7 @@ var Ext string
// WriteIntoFile will write content in a file
func WriteIntoFile(data []byte, file string) error {
- err := ioutil.WriteFile(file, data, os.ModePerm)
+ err := os.WriteFile(file, data, os.ModePerm) // #nosec G306
if err != nil {
return err
}
@@ -38,6 +37,7 @@ func CreatePathAndFile(pathToConfig string) error {
if err != nil {
return err
}
+ // #nosec G306
if err := os.MkdirAll(filepath.Dir(p), os.ModePerm); err != nil {
return err
}
@@ -45,6 +45,7 @@ func CreatePathAndFile(pathToConfig string) error {
// Created a empty file with right permission
if _, err := os.Stat(p); err != nil {
if os.IsNotExist(err) {
+ // #nosec G306
if err := os.WriteFile(p, []byte(""), os.ModePerm); err != nil {
return err
}
@@ -62,6 +63,7 @@ func SetupFlyteDir() error {
// Created a empty file with right permission
if _, err := os.Stat(docker.Kubeconfig); err != nil {
if os.IsNotExist(err) {
+ // #nosec G306
if err := os.WriteFile(docker.Kubeconfig, []byte(""), os.ModePerm); err != nil {
return err
}
diff --git a/flytectl/pkg/visualize/graphviz.go b/flytectl/pkg/visualize/graphviz.go
index 745c7ad248..be4f275fc5 100644
--- a/flytectl/pkg/visualize/graphviz.go
+++ b/flytectl/pkg/visualize/graphviz.go
@@ -56,11 +56,11 @@ func operandToString(op *core.Operand) string {
}
func comparisonToString(expr *core.ComparisonExpression) string {
- return fmt.Sprintf("%s %s %s", operandToString(expr.LeftValue), expr.Operator.String(), operandToString(expr.RightValue))
+ return fmt.Sprintf("%s %s %s", operandToString(expr.GetLeftValue()), expr.GetOperator().String(), operandToString(expr.GetRightValue()))
}
func conjunctionToString(expr *core.ConjunctionExpression) string {
- return fmt.Sprintf("(%s) %s (%s)", booleanExprToString(expr.LeftExpression), expr.Operator.String(), booleanExprToString(expr.RightExpression))
+ return fmt.Sprintf("(%s) %s (%s)", booleanExprToString(expr.GetLeftExpression()), expr.GetOperator().String(), booleanExprToString(expr.GetRightExpression()))
}
func booleanExprToString(expr *core.BooleanExpression) string {
@@ -86,9 +86,9 @@ func constructEndNode(parentGraph string, n string, graph Graphvizer) (*graphviz
func constructTaskNode(parentGraph string, name string, graph Graphvizer, n *core.Node, t *core.CompiledTask) (*graphviz.Node, error) {
attrs := map[string]string{ShapeType: BoxShape}
- if n.Metadata != nil && n.Metadata.Name != "" {
- v := strings.LastIndexAny(n.Metadata.Name, ".")
- attrs[LabelAttr] = fmt.Sprintf("\"%s [%s]\"", n.Metadata.Name[v+1:], t.Template.Type)
+ if n.GetMetadata() != nil && n.GetMetadata().GetName() != "" {
+ v := strings.LastIndexAny(n.GetMetadata().GetName(), ".")
+ attrs[LabelAttr] = fmt.Sprintf("\"%s [%s]\"", n.GetMetadata().GetName()[v+1:], t.GetTemplate().GetType())
}
tName := strings.ReplaceAll(name, "-", "_")
err := graph.AddNode(parentGraph, tName, attrs)
@@ -104,8 +104,8 @@ func constructErrorNode(parentGraph string, name string, graph Graphvizer, m str
func constructBranchConditionNode(parentGraph string, name string, graph Graphvizer, n *core.Node) (*graphviz.Node, error) {
attrs := map[string]string{ShapeType: DiamondShape}
- if n.Metadata != nil && n.Metadata.Name != "" {
- attrs[LabelAttr] = fmt.Sprintf("\"[%s]\"", n.Metadata.Name)
+ if n.GetMetadata() != nil && n.GetMetadata().GetName() != "" {
+ attrs[LabelAttr] = fmt.Sprintf("\"[%s]\"", n.GetMetadata().GetName())
}
cName := strings.ReplaceAll(name, "-", "_")
err := graph.AddNode(parentGraph, cName, attrs)
@@ -151,27 +151,27 @@ func (gb *graphBuilder) addBranchSubNodeEdge(graph Graphvizer, parentNode, n *gr
}
func (gb *graphBuilder) constructBranchNode(parentGraph string, prefix string, graph Graphvizer, n *core.Node) (*graphviz.Node, error) {
- parentBranchNode, err := constructBranchConditionNode(parentGraph, getName(prefix, n.Id), graph, n)
+ parentBranchNode, err := constructBranchConditionNode(parentGraph, getName(prefix, n.GetId()), graph, n)
if err != nil {
return nil, err
}
- gb.graphNodes[n.Id] = parentBranchNode
+ gb.graphNodes[n.GetId()] = parentBranchNode
if n.GetBranchNode().GetIfElse() == nil {
return parentBranchNode, nil
}
- subNode, err := gb.constructNode(parentGraph, prefix, graph, n.GetBranchNode().GetIfElse().Case.ThenNode)
+ subNode, err := gb.constructNode(parentGraph, prefix, graph, n.GetBranchNode().GetIfElse().GetCase().GetThenNode())
if err != nil {
return nil, err
}
- if err := gb.addBranchSubNodeEdge(graph, parentBranchNode, subNode, booleanExprToString(n.GetBranchNode().GetIfElse().Case.Condition)); err != nil {
+ if err := gb.addBranchSubNodeEdge(graph, parentBranchNode, subNode, booleanExprToString(n.GetBranchNode().GetIfElse().GetCase().GetCondition())); err != nil {
return nil, err
}
if n.GetBranchNode().GetIfElse().GetError() != nil {
name := fmt.Sprintf("%s-error", parentBranchNode.Name)
- subNode, err := constructErrorNode(prefix, name, graph, n.GetBranchNode().GetIfElse().GetError().Message)
+ subNode, err := constructErrorNode(prefix, name, graph, n.GetBranchNode().GetIfElse().GetError().GetMessage())
if err != nil {
return nil, err
}
@@ -191,11 +191,11 @@ func (gb *graphBuilder) constructBranchNode(parentGraph string, prefix string, g
if n.GetBranchNode().GetIfElse().GetOther() != nil {
for _, c := range n.GetBranchNode().GetIfElse().GetOther() {
- subNode, err := gb.constructNode(parentGraph, prefix, graph, c.ThenNode)
+ subNode, err := gb.constructNode(parentGraph, prefix, graph, c.GetThenNode())
if err != nil {
return nil, err
}
- if err := gb.addBranchSubNodeEdge(graph, parentBranchNode, subNode, booleanExprToString(c.Condition)); err != nil {
+ if err := gb.addBranchSubNodeEdge(graph, parentBranchNode, subNode, booleanExprToString(c.GetCondition())); err != nil {
return nil, err
}
}
@@ -204,18 +204,18 @@ func (gb *graphBuilder) constructBranchNode(parentGraph string, prefix string, g
}
func (gb *graphBuilder) constructNode(parentGraphName string, prefix string, graph Graphvizer, n *core.Node) (*graphviz.Node, error) {
- name := getName(prefix, n.Id)
+ name := getName(prefix, n.GetId())
var err error
var gn *graphviz.Node
- if n.Id == StartNode {
+ if n.GetId() == StartNode {
gn, err = constructStartNode(parentGraphName, strings.ReplaceAll(name, "-", "_"), graph)
gb.nodeClusters[name] = parentGraphName
- } else if n.Id == EndNode {
+ } else if n.GetId() == EndNode {
gn, err = constructEndNode(parentGraphName, strings.ReplaceAll(name, "-", "_"), graph)
gb.nodeClusters[name] = parentGraphName
} else {
- switch n.Target.(type) {
+ switch n.GetTarget().(type) {
case *core.Node_TaskNode:
tID := n.GetTaskNode().GetReferenceId().String()
t, ok := gb.tasks[tID]
@@ -228,7 +228,7 @@ func (gb *graphBuilder) constructNode(parentGraphName string, prefix string, gra
}
gb.nodeClusters[name] = parentGraphName
case *core.Node_BranchNode:
- sanitizedName := strings.ReplaceAll(n.Metadata.Name, "-", "_")
+ sanitizedName := strings.ReplaceAll(n.GetMetadata().GetName(), "-", "_")
branchSubGraphName := SubgraphPrefix + sanitizedName
err := graph.AddSubGraph(parentGraphName, branchSubGraphName, map[string]string{LabelAttr: sanitizedName})
if err != nil {
@@ -269,7 +269,7 @@ func (gb *graphBuilder) constructNode(parentGraphName string, prefix string, gra
if err != nil {
return nil, err
}
- gb.graphNodes[n.Id] = gn
+ gb.graphNodes[n.GetId()] = gn
return gn, nil
}
@@ -298,27 +298,27 @@ func (gb *graphBuilder) addEdge(fromNodeName, toNodeName string, graph Graphvize
}
func (gb *graphBuilder) constructGraph(parentGraphName string, prefix string, graph Graphvizer, w *core.CompiledWorkflow) error {
- if w == nil || w.Template == nil {
+ if w == nil || w.GetTemplate() == nil {
return nil
}
- for _, n := range w.Template.Nodes {
+ for _, n := range w.GetTemplate().GetNodes() {
if _, err := gb.constructNode(parentGraphName, prefix, graph, n); err != nil {
return err
}
}
for name := range gb.graphNodes {
- upstreamNodes := w.Connections.Upstream[name]
- downstreamNodes := w.Connections.Downstream[name]
+ upstreamNodes := w.GetConnections().GetUpstream()[name]
+ downstreamNodes := w.GetConnections().GetDownstream()[name]
if downstreamNodes != nil {
- for _, n := range downstreamNodes.Ids {
+ for _, n := range downstreamNodes.GetIds() {
if err := gb.addEdge(name, n, graph); err != nil {
return err
}
}
}
if upstreamNodes != nil {
- for _, n := range upstreamNodes.Ids {
+ for _, n := range upstreamNodes.GetIds() {
if err := gb.addEdge(n, name, graph); err != nil {
return err
}
@@ -334,23 +334,23 @@ func (gb *graphBuilder) CompiledWorkflowClosureToGraph(w *core.CompiledWorkflowC
_ = dotGraph.SetStrict(true)
tLookup := make(map[string]*core.CompiledTask)
- for _, t := range w.Tasks {
- if t.Template == nil || t.Template.Id == nil {
+ for _, t := range w.GetTasks() {
+ if t.GetTemplate() == nil || t.GetTemplate().GetId() == nil {
return FlyteGraph{}, fmt.Errorf("no template found in the workflow task %v", t)
}
- tLookup[t.Template.Id.String()] = t
+ tLookup[t.GetTemplate().GetId().String()] = t
}
gb.tasks = tLookup
wLookup := make(map[string]*core.CompiledWorkflow)
- for _, swf := range w.SubWorkflows {
- if swf.Template == nil || swf.Template.Id == nil {
+ for _, swf := range w.GetSubWorkflows() {
+ if swf.GetTemplate() == nil || swf.GetTemplate().GetId() == nil {
return FlyteGraph{}, fmt.Errorf("no template found in the sub workflow %v", swf)
}
- wLookup[swf.Template.Id.String()] = swf
+ wLookup[swf.GetTemplate().GetId().String()] = swf
}
gb.subWf = wLookup
- return dotGraph, gb.constructGraph("", "", dotGraph, w.Primary)
+ return dotGraph, gb.constructGraph("", "", dotGraph, w.GetPrimary())
}
func newGraphBuilder() *graphBuilder {
diff --git a/flyteidl/.golangci.yml b/flyteidl/.golangci.yml
index 7714cbe5a3..8eda34cffe 100644
--- a/flyteidl/.golangci.yml
+++ b/flyteidl/.golangci.yml
@@ -1,31 +1,22 @@
-# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES.
-# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY:
-#
-# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst
-
run:
skip-dirs:
- - pkg/client
+ - pkg/client
- gen
-
linters:
disable-all: true
enable:
- - deadcode
- errcheck
- - gas
+ - gosec
- goconst
- goimports
- - golint
- gosimple
- govet
- ineffassign
- misspell
- nakedret
- staticcheck
- - structcheck
- typecheck
- unconvert
- unparam
- unused
- - varcheck
+ - protogetter
diff --git a/flyteidl/clients/go/admin/auth_interceptor.go b/flyteidl/clients/go/admin/auth_interceptor.go
index 221dd98e9b..802db2cb0e 100644
--- a/flyteidl/clients/go/admin/auth_interceptor.go
+++ b/flyteidl/clients/go/admin/auth_interceptor.go
@@ -106,7 +106,7 @@ func setHTTPClientContext(ctx context.Context, cfg *Config, proxyCredentialsFutu
transport.Proxy = http.ProxyURL(&cfg.HTTPProxyURL.URL)
}
- if cfg.ProxyCommand != nil && len(cfg.ProxyCommand) > 0 {
+ if len(cfg.ProxyCommand) > 0 {
httpClient.Transport = &proxyAuthTransport{
transport: transport,
proxyCredentialsFuture: proxyCredentialsFuture,
@@ -143,7 +143,7 @@ func (o *OauthMetadataProvider) getTokenSourceAndMetadata(cfg *Config, tokenCach
if err != nil {
return fmt.Errorf("failed to fetch client metadata. Error: %v", err)
}
- authorizationMetadataKey = clientMetadata.AuthorizationMetadataKey
+ authorizationMetadataKey = clientMetadata.GetAuthorizationMetadataKey()
}
tokenSource, err := tokenSourceProvider.GetTokenSource(ctx)
@@ -238,7 +238,7 @@ func NewAuthInterceptor(cfg *Config, tokenCache cache.TokenCache, credentialsFut
if newErr != nil {
errString := fmt.Sprintf("authentication error! Original Error: %v, Auth Error: %v", err, newErr)
logger.Errorf(ctx, errString)
- return fmt.Errorf(errString)
+ return fmt.Errorf(errString) //nolint
}
tokenCache.CondBroadcast()
diff --git a/flyteidl/clients/go/admin/client.go b/flyteidl/clients/go/admin/client.go
index 9f14d49dee..6f6b5d46fb 100644
--- a/flyteidl/clients/go/admin/client.go
+++ b/flyteidl/clients/go/admin/client.go
@@ -73,7 +73,7 @@ func GetAdditionalAdminClientConfigOptions(cfg *Config) []grpc.DialOption {
opts = append(opts, grpc.WithBackoffConfig(backoffConfig))
timeoutDialOption := grpcRetry.WithPerRetryTimeout(cfg.PerRetryTimeout.Duration)
- maxRetriesOption := grpcRetry.WithMax(uint(cfg.MaxRetries))
+ maxRetriesOption := grpcRetry.WithMax(uint(cfg.MaxRetries)) // #nosec G115
retryInterceptor := grpcRetry.UnaryClientInterceptor(timeoutDialOption, maxRetriesOption)
// We only make unary calls in this client, no streaming calls. We can add a streaming interceptor if admin
@@ -101,7 +101,7 @@ func getAuthenticationDialOption(ctx context.Context, cfg *Config, tokenSourcePr
if err != nil {
return nil, fmt.Errorf("failed to fetch client metadata. Error: %v", err)
}
- authorizationMetadataKey = clientMetadata.AuthorizationMetadataKey
+ authorizationMetadataKey = clientMetadata.GetAuthorizationMetadataKey()
}
tokenSource, err := tokenSourceProvider.GetTokenSource(ctx)
@@ -157,7 +157,7 @@ func NewAdminConnection(ctx context.Context, cfg *Config, proxyCredentialsFuture
opts = append(opts, GetAdditionalAdminClientConfigOptions(cfg)...)
- if cfg.ProxyCommand != nil && len(cfg.ProxyCommand) > 0 {
+ if len(cfg.ProxyCommand) > 0 {
opts = append(opts, grpc.WithChainUnaryInterceptor(NewProxyAuthInterceptor(cfg, proxyCredentialsFuture)))
opts = append(opts, grpc.WithPerRPCCredentials(proxyCredentialsFuture))
}
diff --git a/flyteidl/clients/go/admin/oauth/config.go b/flyteidl/clients/go/admin/oauth/config.go
index f0a8b9afa4..94055f678a 100644
--- a/flyteidl/clients/go/admin/oauth/config.go
+++ b/flyteidl/clients/go/admin/oauth/config.go
@@ -30,16 +30,16 @@ func BuildConfigFromMetadataService(ctx context.Context, authMetadataClient serv
clientConf = &Config{
Config: &oauth2.Config{
- ClientID: clientResp.ClientId,
- RedirectURL: clientResp.RedirectUri,
- Scopes: clientResp.Scopes,
+ ClientID: clientResp.GetClientId(),
+ RedirectURL: clientResp.GetRedirectUri(),
+ Scopes: clientResp.GetScopes(),
Endpoint: oauth2.Endpoint{
- TokenURL: oauthMetaResp.TokenEndpoint,
- AuthURL: oauthMetaResp.AuthorizationEndpoint,
+ TokenURL: oauthMetaResp.GetTokenEndpoint(),
+ AuthURL: oauthMetaResp.GetAuthorizationEndpoint(),
},
},
- DeviceEndpoint: oauthMetaResp.DeviceAuthorizationEndpoint,
- Audience: clientResp.Audience,
+ DeviceEndpoint: oauthMetaResp.GetDeviceAuthorizationEndpoint(),
+ Audience: clientResp.GetAudience(),
}
return clientConf, nil
diff --git a/flyteidl/clients/go/admin/token_source_provider.go b/flyteidl/clients/go/admin/token_source_provider.go
index 2a51832da6..b0ab0ce3e1 100644
--- a/flyteidl/clients/go/admin/token_source_provider.go
+++ b/flyteidl/clients/go/admin/token_source_provider.go
@@ -54,7 +54,7 @@ func NewTokenSourceProvider(ctx context.Context, cfg *Config, tokenCache cache.T
return nil, fmt.Errorf("failed to fetch auth metadata. Error: %v", err)
}
- tokenURL = metadata.TokenEndpoint
+ tokenURL = metadata.GetTokenEndpoint()
}
scopes := cfg.Scopes
@@ -67,11 +67,11 @@ func NewTokenSourceProvider(ctx context.Context, cfg *Config, tokenCache cache.T
}
// Update scopes from publicClientConfig
if len(scopes) == 0 {
- scopes = publicClientConfig.Scopes
+ scopes = publicClientConfig.GetScopes()
}
// Update audience from publicClientConfig
if cfg.UseAudienceFromAdmin {
- audienceValue = publicClientConfig.Audience
+ audienceValue = publicClientConfig.GetAudience()
}
}
diff --git a/flyteidl/clients/go/coreutils/extract_literal.go b/flyteidl/clients/go/coreutils/extract_literal.go
index 23302de9a3..08e534c5b7 100644
--- a/flyteidl/clients/go/coreutils/extract_literal.go
+++ b/flyteidl/clients/go/coreutils/extract_literal.go
@@ -28,11 +28,11 @@ import (
)
func ExtractFromLiteral(literal *core.Literal) (interface{}, error) {
- switch literalValue := literal.Value.(type) {
+ switch literalValue := literal.GetValue().(type) {
case *core.Literal_Scalar:
- switch scalarValue := literalValue.Scalar.Value.(type) {
+ switch scalarValue := literalValue.Scalar.GetValue().(type) {
case *core.Scalar_Primitive:
- switch scalarPrimitive := scalarValue.Primitive.Value.(type) {
+ switch scalarPrimitive := scalarValue.Primitive.GetValue().(type) {
case *core.Primitive_Integer:
scalarPrimitiveInt := scalarPrimitive.Integer
return scalarPrimitiveInt, nil
@@ -57,16 +57,16 @@ func ExtractFromLiteral(literal *core.Literal) (interface{}, error) {
case *core.Scalar_Binary:
return scalarValue.Binary, nil
case *core.Scalar_Blob:
- return scalarValue.Blob.Uri, nil
+ return scalarValue.Blob.GetUri(), nil
case *core.Scalar_Schema:
- return scalarValue.Schema.Uri, nil
+ return scalarValue.Schema.GetUri(), nil
case *core.Scalar_Generic:
return scalarValue.Generic, nil
case *core.Scalar_StructuredDataset:
- return scalarValue.StructuredDataset.Uri, nil
+ return scalarValue.StructuredDataset.GetUri(), nil
case *core.Scalar_Union:
// extract the value of the union but not the actual union object
- extractedVal, err := ExtractFromLiteral(scalarValue.Union.Value)
+ extractedVal, err := ExtractFromLiteral(scalarValue.Union.GetValue())
if err != nil {
return nil, err
}
@@ -77,7 +77,7 @@ func ExtractFromLiteral(literal *core.Literal) (interface{}, error) {
return nil, fmt.Errorf("unsupported literal scalar type %T", scalarValue)
}
case *core.Literal_Collection:
- collectionValue := literalValue.Collection.Literals
+ collectionValue := literalValue.Collection.GetLiterals()
collection := make([]interface{}, len(collectionValue))
for index, val := range collectionValue {
if collectionElem, err := ExtractFromLiteral(val); err == nil {
@@ -88,7 +88,7 @@ func ExtractFromLiteral(literal *core.Literal) (interface{}, error) {
}
return collection, nil
case *core.Literal_Map:
- mapLiteralValue := literalValue.Map.Literals
+ mapLiteralValue := literalValue.Map.GetLiterals()
mapResult := make(map[string]interface{}, len(mapLiteralValue))
for key, val := range mapLiteralValue {
if val, err := ExtractFromLiteral(val); err == nil {
@@ -100,7 +100,7 @@ func ExtractFromLiteral(literal *core.Literal) (interface{}, error) {
return mapResult, nil
case *core.Literal_OffloadedMetadata:
// Return the URI of the offloaded metadata to be used when displaying in flytectl
- return literalValue.OffloadedMetadata.Uri, nil
+ return literalValue.OffloadedMetadata.GetUri(), nil
}
return nil, fmt.Errorf("unsupported literal type %T", literal)
diff --git a/flyteidl/clients/go/coreutils/extract_literal_test.go b/flyteidl/clients/go/coreutils/extract_literal_test.go
index 0cd4c2fb16..9d6e035775 100644
--- a/flyteidl/clients/go/coreutils/extract_literal_test.go
+++ b/flyteidl/clients/go/coreutils/extract_literal_test.go
@@ -148,9 +148,9 @@ func TestFetchLiteral(t *testing.T) {
Fields: fieldsMap,
}
extractedStructValue := extractedLiteralVal.(*structpb.Struct)
- assert.Equal(t, len(expectedStructVal.Fields), len(extractedStructValue.Fields))
- for key, val := range expectedStructVal.Fields {
- assert.Equal(t, val.Kind, extractedStructValue.Fields[key].Kind)
+ assert.Equal(t, len(expectedStructVal.GetFields()), len(extractedStructValue.GetFields()))
+ for key, val := range expectedStructVal.GetFields() {
+ assert.Equal(t, val.GetKind(), extractedStructValue.GetFields()[key].GetKind())
}
os.Unsetenv(FlyteUseOldDcFormat)
})
@@ -174,9 +174,9 @@ func TestFetchLiteral(t *testing.T) {
Fields: fieldsMap,
}
extractedStructValue := extractedLiteralVal.(*structpb.Struct)
- assert.Equal(t, len(expectedStructVal.Fields), len(extractedStructValue.Fields))
- for key, val := range expectedStructVal.Fields {
- assert.Equal(t, val.Kind, extractedStructValue.Fields[key].Kind)
+ assert.Equal(t, len(expectedStructVal.GetFields()), len(extractedStructValue.GetFields()))
+ for key, val := range expectedStructVal.GetFields() {
+ assert.Equal(t, val.GetKind(), extractedStructValue.GetFields()[key].GetKind())
}
})
diff --git a/flyteidl/clients/go/coreutils/literals.go b/flyteidl/clients/go/coreutils/literals.go
index 6f292d7118..310e389c73 100644
--- a/flyteidl/clients/go/coreutils/literals.go
+++ b/flyteidl/clients/go/coreutils/literals.go
@@ -306,20 +306,20 @@ func MakeDefaultLiteralForType(typ *core.LiteralType) (*core.Literal, error) {
case *core.LiteralType_Schema:
return MakeLiteralForType(typ, nil)
case *core.LiteralType_UnionType:
- if len(t.UnionType.Variants) == 0 {
+ if len(t.UnionType.GetVariants()) == 0 {
return nil, errors.Errorf("Union type must have at least one variant")
}
// For union types, we just return the default for the first variant
- val, err := MakeDefaultLiteralForType(t.UnionType.Variants[0])
+ val, err := MakeDefaultLiteralForType(t.UnionType.GetVariants()[0])
if err != nil {
- return nil, errors.Errorf("Failed to create default literal for first union type variant [%v]", t.UnionType.Variants[0])
+ return nil, errors.Errorf("Failed to create default literal for first union type variant [%v]", t.UnionType.GetVariants()[0])
}
res := &core.Literal{
Value: &core.Literal_Scalar{
Scalar: &core.Scalar{
Value: &core.Scalar_Union{
Union: &core.Union{
- Type: t.UnionType.Variants[0],
+ Type: t.UnionType.GetVariants()[0],
Value: val,
},
},
@@ -511,7 +511,7 @@ func MakeLiteralForBlob(path storage.DataReference, isDir bool, format string) *
func MakeLiteralForType(t *core.LiteralType, v interface{}) (*core.Literal, error) {
l := &core.Literal{}
- switch newT := t.Type.(type) {
+ switch newT := t.GetType().(type) {
case *core.LiteralType_MapValueType:
newV, ok := v.(map[string]interface{})
if !ok {
@@ -600,24 +600,24 @@ func MakeLiteralForType(t *core.LiteralType, v interface{}) (*core.Literal, erro
return lv, nil
case *core.LiteralType_Blob:
- isDir := newT.Blob.Dimensionality == core.BlobType_MULTIPART
- lv := MakeLiteralForBlob(storage.DataReference(fmt.Sprintf("%v", v)), isDir, newT.Blob.Format)
+ isDir := newT.Blob.GetDimensionality() == core.BlobType_MULTIPART
+ lv := MakeLiteralForBlob(storage.DataReference(fmt.Sprintf("%v", v)), isDir, newT.Blob.GetFormat())
return lv, nil
case *core.LiteralType_Schema:
- lv := MakeLiteralForSchema(storage.DataReference(fmt.Sprintf("%v", v)), newT.Schema.Columns)
+ lv := MakeLiteralForSchema(storage.DataReference(fmt.Sprintf("%v", v)), newT.Schema.GetColumns())
return lv, nil
case *core.LiteralType_StructuredDatasetType:
- lv := MakeLiteralForStructuredDataSet(storage.DataReference(fmt.Sprintf("%v", v)), newT.StructuredDatasetType.Columns, newT.StructuredDatasetType.Format)
+ lv := MakeLiteralForStructuredDataSet(storage.DataReference(fmt.Sprintf("%v", v)), newT.StructuredDatasetType.GetColumns(), newT.StructuredDatasetType.GetFormat())
return lv, nil
case *core.LiteralType_EnumType:
var newV string
if v == nil {
- if len(t.GetEnumType().Values) == 0 {
+ if len(t.GetEnumType().GetValues()) == 0 {
return nil, fmt.Errorf("enum types need at least one value")
}
- newV = t.GetEnumType().Values[0]
+ newV = t.GetEnumType().GetValues()[0]
} else {
var ok bool
newV, ok = v.(string)
@@ -640,7 +640,7 @@ func MakeLiteralForType(t *core.LiteralType, v interface{}) (*core.Literal, erro
case *core.LiteralType_UnionType:
// Try different types in the variants, return the first one matched
found := false
- for _, subType := range newT.UnionType.Variants {
+ for _, subType := range newT.UnionType.GetVariants() {
lv, err := MakeLiteralForType(subType, v)
if err == nil {
l = &core.Literal{
@@ -660,7 +660,7 @@ func MakeLiteralForType(t *core.LiteralType, v interface{}) (*core.Literal, erro
}
}
if !found {
- return nil, fmt.Errorf("incorrect union value [%s], supported values %+v", v, newT.UnionType.Variants)
+ return nil, fmt.Errorf("incorrect union value [%s], supported values %+v", v, newT.UnionType.GetVariants())
}
default:
return nil, fmt.Errorf("unsupported type %s", t.String())
diff --git a/flyteidl/clients/go/coreutils/literals_test.go b/flyteidl/clients/go/coreutils/literals_test.go
index f2d8c9e5b2..3586e0e02f 100644
--- a/flyteidl/clients/go/coreutils/literals_test.go
+++ b/flyteidl/clients/go/coreutils/literals_test.go
@@ -27,42 +27,42 @@ func TestMakePrimitive(t *testing.T) {
v := 1
p, err := MakePrimitive(v)
assert.NoError(t, err)
- assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.Value).String())
+ assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.GetValue()).String())
assert.Equal(t, int64(v), p.GetInteger())
}
{
v := int64(1)
p, err := MakePrimitive(v)
assert.NoError(t, err)
- assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.Value).String())
+ assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.GetValue()).String())
assert.Equal(t, v, p.GetInteger())
}
{
v := 1.0
p, err := MakePrimitive(v)
assert.NoError(t, err)
- assert.Equal(t, "*core.Primitive_FloatValue", reflect.TypeOf(p.Value).String())
+ assert.Equal(t, "*core.Primitive_FloatValue", reflect.TypeOf(p.GetValue()).String())
assert.Equal(t, v, p.GetFloatValue())
}
{
v := "blah"
p, err := MakePrimitive(v)
assert.NoError(t, err)
- assert.Equal(t, "*core.Primitive_StringValue", reflect.TypeOf(p.Value).String())
+ assert.Equal(t, "*core.Primitive_StringValue", reflect.TypeOf(p.GetValue()).String())
assert.Equal(t, v, p.GetStringValue())
}
{
v := true
p, err := MakePrimitive(v)
assert.NoError(t, err)
- assert.Equal(t, "*core.Primitive_Boolean", reflect.TypeOf(p.Value).String())
+ assert.Equal(t, "*core.Primitive_Boolean", reflect.TypeOf(p.GetValue()).String())
assert.Equal(t, v, p.GetBoolean())
}
{
v := time.Now()
p, err := MakePrimitive(v)
assert.NoError(t, err)
- assert.Equal(t, "*core.Primitive_Datetime", reflect.TypeOf(p.Value).String())
+ assert.Equal(t, "*core.Primitive_Datetime", reflect.TypeOf(p.GetValue()).String())
j, err := ptypes.TimestampProto(v)
assert.NoError(t, err)
assert.Equal(t, j, p.GetDatetime())
@@ -73,7 +73,7 @@ func TestMakePrimitive(t *testing.T) {
v := time.Second * 10
p, err := MakePrimitive(v)
assert.NoError(t, err)
- assert.Equal(t, "*core.Primitive_Duration", reflect.TypeOf(p.Value).String())
+ assert.Equal(t, "*core.Primitive_Duration", reflect.TypeOf(p.GetValue()).String())
assert.Equal(t, ptypes.DurationProto(v), p.GetDuration())
}
{
@@ -95,7 +95,7 @@ func TestMustMakePrimitive(t *testing.T) {
{
v := time.Second * 10
p := MustMakePrimitive(v)
- assert.Equal(t, "*core.Primitive_Duration", reflect.TypeOf(p.Value).String())
+ assert.Equal(t, "*core.Primitive_Duration", reflect.TypeOf(p.GetValue()).String())
assert.Equal(t, ptypes.DurationProto(v), p.GetDuration())
}
}
@@ -106,7 +106,7 @@ func TestMakePrimitiveLiteral(t *testing.T) {
p, err := MakePrimitiveLiteral(v)
assert.NoError(t, err)
assert.NotNil(t, p.GetScalar())
- assert.Equal(t, "*core.Primitive_FloatValue", reflect.TypeOf(p.GetScalar().GetPrimitive().Value).String())
+ assert.Equal(t, "*core.Primitive_FloatValue", reflect.TypeOf(p.GetScalar().GetPrimitive().GetValue()).String())
assert.Equal(t, v, p.GetScalar().GetPrimitive().GetFloatValue())
}
{
@@ -129,7 +129,7 @@ func TestMustMakePrimitiveLiteral(t *testing.T) {
v := 1.0
p := MustMakePrimitiveLiteral(v)
assert.NotNil(t, p.GetScalar())
- assert.Equal(t, "*core.Primitive_FloatValue", reflect.TypeOf(p.GetScalar().GetPrimitive().Value).String())
+ assert.Equal(t, "*core.Primitive_FloatValue", reflect.TypeOf(p.GetScalar().GetPrimitive().GetValue()).String())
assert.Equal(t, v, p.GetScalar().GetPrimitive().GetFloatValue())
})
}
@@ -138,14 +138,14 @@ func TestMakeLiteral(t *testing.T) {
t.Run("Primitive", func(t *testing.T) {
lit, err := MakeLiteral("test_string")
assert.NoError(t, err)
- assert.Equal(t, "*core.Primitive_StringValue", reflect.TypeOf(lit.GetScalar().GetPrimitive().Value).String())
+ assert.Equal(t, "*core.Primitive_StringValue", reflect.TypeOf(lit.GetScalar().GetPrimitive().GetValue()).String())
})
t.Run("Array", func(t *testing.T) {
lit, err := MakeLiteral([]interface{}{1, 2, 3})
assert.NoError(t, err)
assert.Equal(t, "*core.Literal_Collection", reflect.TypeOf(lit.GetValue()).String())
- assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(lit.GetCollection().Literals[0].GetScalar().GetPrimitive().Value).String())
+ assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(lit.GetCollection().GetLiterals()[0].GetScalar().GetPrimitive().GetValue()).String())
})
t.Run("Map", func(t *testing.T) {
@@ -155,7 +155,7 @@ func TestMakeLiteral(t *testing.T) {
})
assert.NoError(t, err)
assert.Equal(t, "*core.Literal_Map", reflect.TypeOf(lit.GetValue()).String())
- assert.Equal(t, "*core.Literal_Collection", reflect.TypeOf(lit.GetMap().Literals["key1"].GetValue()).String())
+ assert.Equal(t, "*core.Literal_Collection", reflect.TypeOf(lit.GetMap().GetLiterals()["key1"].GetValue()).String())
})
t.Run("Binary", func(t *testing.T) {
@@ -167,7 +167,7 @@ func TestMakeLiteral(t *testing.T) {
p, err := MakeLiteral(nil)
assert.NoError(t, err)
assert.NotNil(t, p.GetScalar())
- assert.Equal(t, "*core.Scalar_NoneType", reflect.TypeOf(p.GetScalar().Value).String())
+ assert.Equal(t, "*core.Scalar_NoneType", reflect.TypeOf(p.GetScalar().GetValue()).String())
})
}
@@ -205,9 +205,9 @@ func TestMakeDefaultLiteralForType(t *testing.T) {
l, err := MakeDefaultLiteralForType(&core.LiteralType{Type: &core.LiteralType_Simple{Simple: test.ty}})
assert.NoError(t, err)
if test.isPrimitive {
- assert.Equal(t, test.tyName, reflect.TypeOf(l.GetScalar().GetPrimitive().Value).String())
+ assert.Equal(t, test.tyName, reflect.TypeOf(l.GetScalar().GetPrimitive().GetValue()).String())
} else {
- assert.Equal(t, test.tyName, reflect.TypeOf(l.GetScalar().Value).String())
+ assert.Equal(t, test.tyName, reflect.TypeOf(l.GetScalar().GetValue()).String())
}
})
}
@@ -221,7 +221,7 @@ func TestMakeDefaultLiteralForType(t *testing.T) {
t.Run("Blob", func(t *testing.T) {
l, err := MakeDefaultLiteralForType(&core.LiteralType{Type: &core.LiteralType_Blob{}})
assert.NoError(t, err)
- assert.Equal(t, "*core.Scalar_Blob", reflect.TypeOf(l.GetScalar().Value).String())
+ assert.Equal(t, "*core.Scalar_Blob", reflect.TypeOf(l.GetScalar().GetValue()).String())
})
t.Run("Collection", func(t *testing.T) {
@@ -300,7 +300,7 @@ func TestMustMakeDefaultLiteralForType(t *testing.T) {
t.Run("Blob", func(t *testing.T) {
l := MustMakeDefaultLiteralForType(&core.LiteralType{Type: &core.LiteralType_Blob{}})
- assert.Equal(t, "*core.Scalar_Blob", reflect.TypeOf(l.GetScalar().Value).String())
+ assert.Equal(t, "*core.Scalar_Blob", reflect.TypeOf(l.GetScalar().GetValue()).String())
})
}
@@ -479,9 +479,9 @@ func TestMakeLiteralForType(t *testing.T) {
Fields: fieldsMap,
}
extractedStructValue := extractedLiteralVal.(*structpb.Struct)
- assert.Equal(t, len(expectedStructVal.Fields), len(extractedStructValue.Fields))
- for key, val := range expectedStructVal.Fields {
- assert.Equal(t, val.Kind, extractedStructValue.Fields[key].Kind)
+ assert.Equal(t, len(expectedStructVal.GetFields()), len(extractedStructValue.GetFields()))
+ for key, val := range expectedStructVal.GetFields() {
+ assert.Equal(t, val.GetKind(), extractedStructValue.GetFields()[key].GetKind())
}
os.Unsetenv(FlyteUseOldDcFormat)
})
@@ -539,9 +539,9 @@ func TestMakeLiteralForType(t *testing.T) {
// Now check if the Binary values match
var expectedVal, actualVal map[string]interface{}
- err = msgpack.Unmarshal(expectedBinary.Value, &expectedVal)
+ err = msgpack.Unmarshal(expectedBinary.GetValue(), &expectedVal)
assert.NoError(t, err)
- err = msgpack.Unmarshal(actualBinary.Value, &actualVal)
+ err = msgpack.Unmarshal(actualBinary.GetValue(), &actualVal)
assert.NoError(t, err)
// Finally, assert that the deserialized values are equal
diff --git a/flyteplugins/.golangci.yml b/flyteplugins/.golangci.yml
index 6d13f4a3b6..9b6ab1e86d 100644
--- a/flyteplugins/.golangci.yml
+++ b/flyteplugins/.golangci.yml
@@ -1,35 +1,25 @@
-# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES.
-# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY:
-#
-# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst
-
run:
skip-dirs:
- pkg/client
-
linters:
disable-all: true
enable:
- - deadcode
- errcheck
- - gas
+ - gosec
- gci
- goconst
- goimports
- - golint
- gosimple
- govet
- ineffassign
- misspell
- nakedret
- staticcheck
- - structcheck
- typecheck
- unconvert
- unparam
- unused
- - varcheck
-
+ - protogetter
linters-settings:
gci:
custom-order: true
diff --git a/flyteplugins/go/tasks/logs/logging_utils.go b/flyteplugins/go/tasks/logs/logging_utils.go
index 4bfff0dd17..1b442fd766 100644
--- a/flyteplugins/go/tasks/logs/logging_utils.go
+++ b/flyteplugins/go/tasks/logs/logging_utils.go
@@ -24,12 +24,14 @@ func GetLogsForContainerInPod(ctx context.Context, logPlugin tasklog.Plugin, tas
return nil, nil
}
+ // #nosec G115
if uint32(len(pod.Spec.Containers)) <= index {
logger.Errorf(ctx, "container IndexOutOfBound, requested [%d], but total containers [%d] in pod phase [%v]", index, len(pod.Spec.Containers), pod.Status.Phase)
return nil, nil
}
containerID := v1.ContainerStatus{}.ContainerID
+ // #nosec G115
if uint32(len(pod.Status.ContainerStatuses)) <= index {
msg := fmt.Sprintf("containerStatus IndexOutOfBound, requested [%d], but total containerStatuses [%d] in pod phase [%v]", index, len(pod.Status.ContainerStatuses), pod.Status.Phase)
if pod.Status.Phase == v1.PodPending {
diff --git a/flyteplugins/go/tasks/pluginmachinery/bundle/fail_fast.go b/flyteplugins/go/tasks/pluginmachinery/bundle/fail_fast.go
index a36edf20ea..0447b488cd 100644
--- a/flyteplugins/go/tasks/pluginmachinery/bundle/fail_fast.go
+++ b/flyteplugins/go/tasks/pluginmachinery/bundle/fail_fast.go
@@ -31,7 +31,7 @@ func (h failFastHandler) Handle(ctx context.Context, tCtx core.TaskExecutionCont
}
return core.DoTransition(core.PhaseInfoFailure("AlwaysFail",
fmt.Sprintf("Task [%s] type [%+v] not supported by platform for this project/domain/workflow",
- taskTemplate.Type, tCtx.TaskExecutionMetadata().GetTaskExecutionID()), &core.TaskInfo{
+ taskTemplate.GetType(), tCtx.TaskExecutionMetadata().GetTaskExecutionID()), &core.TaskInfo{
OccurredAt: &occuredAt,
})), nil
}
diff --git a/flyteplugins/go/tasks/pluginmachinery/bundle/fail_fast_test.go b/flyteplugins/go/tasks/pluginmachinery/bundle/fail_fast_test.go
index c302db8c32..bf856290d1 100644
--- a/flyteplugins/go/tasks/pluginmachinery/bundle/fail_fast_test.go
+++ b/flyteplugins/go/tasks/pluginmachinery/bundle/fail_fast_test.go
@@ -48,8 +48,8 @@ func TestHandleAlwaysFails(t *testing.T) {
transition, err := testHandler.Handle(context.TODO(), taskCtx)
assert.NoError(t, err)
assert.Equal(t, core.PhasePermanentFailure, transition.Info().Phase())
- assert.Equal(t, "AlwaysFail", transition.Info().Err().Code)
- assert.Contains(t, transition.Info().Err().Message, "Task [unsupportedtype]")
+ assert.Equal(t, "AlwaysFail", transition.Info().Err().GetCode())
+ assert.Contains(t, transition.Info().Err().GetMessage(), "Task [unsupportedtype]")
}
func TestAbort(t *testing.T) {
diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/async_client_impl.go b/flyteplugins/go/tasks/pluginmachinery/catalog/async_client_impl.go
index c056989905..224cfd612e 100644
--- a/flyteplugins/go/tasks/pluginmachinery/catalog/async_client_impl.go
+++ b/flyteplugins/go/tasks/pluginmachinery/catalog/async_client_impl.go
@@ -41,7 +41,7 @@ func consistentHash(str string) (string, error) {
func hashInputs(ctx context.Context, key Key) (string, error) {
inputs := &core.LiteralMap{}
- if key.TypedInterface.Inputs != nil {
+ if key.TypedInterface.GetInputs() != nil {
retInputs, err := key.InputReader.Get(ctx)
if err != nil {
return "", err
@@ -88,7 +88,7 @@ func (c AsyncClientImpl) Download(ctx context.Context, requests ...DownloadReque
}
if readerWorkItem.IsCached() {
- cachedResults.Set(uint(idx))
+ cachedResults.Set(uint(idx)) // #nosec G115
cachedCount++
}
case workqueue.WorkStatusFailed:
diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/client_test.go b/flyteplugins/go/tasks/pluginmachinery/catalog/client_test.go
index 15a4347351..7bdb435761 100644
--- a/flyteplugins/go/tasks/pluginmachinery/catalog/client_test.go
+++ b/flyteplugins/go/tasks/pluginmachinery/catalog/client_test.go
@@ -49,11 +49,11 @@ func TestStatus(t *testing.T) {
status := NewStatus(cacheStatus, &catalogMetadata)
assert.Equal(t, status.GetCacheStatus(), cacheStatus)
- assert.Equal(t, status.GetMetadata().DatasetId.Project, catalogMetadata.DatasetId.Project)
- assert.Equal(t, status.GetMetadata().DatasetId.Domain, catalogMetadata.DatasetId.Domain)
- assert.Equal(t, status.GetMetadata().DatasetId.Name, catalogMetadata.DatasetId.Name)
- assert.Equal(t, status.GetMetadata().ArtifactTag.ArtifactId, catalogMetadata.ArtifactTag.ArtifactId)
- assert.Equal(t, status.GetMetadata().ArtifactTag.Name, catalogMetadata.ArtifactTag.Name)
+ assert.Equal(t, status.GetMetadata().GetDatasetId().GetProject(), catalogMetadata.GetDatasetId().GetProject())
+ assert.Equal(t, status.GetMetadata().GetDatasetId().GetDomain(), catalogMetadata.GetDatasetId().GetDomain())
+ assert.Equal(t, status.GetMetadata().GetDatasetId().GetName(), catalogMetadata.GetDatasetId().GetName())
+ assert.Equal(t, status.GetMetadata().GetArtifactTag().GetArtifactId(), catalogMetadata.GetArtifactTag().GetArtifactId())
+ assert.Equal(t, status.GetMetadata().GetArtifactTag().GetName(), catalogMetadata.GetArtifactTag().GetName())
}
func TestEntry(t *testing.T) {
@@ -75,11 +75,11 @@ func TestEntry(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
status := tt.entry.GetStatus()
assert.Equal(t, status.GetCacheStatus(), cacheStatus)
- assert.Equal(t, status.GetMetadata().DatasetId.Project, catalogMetadata.DatasetId.Project)
- assert.Equal(t, status.GetMetadata().DatasetId.Domain, catalogMetadata.DatasetId.Domain)
- assert.Equal(t, status.GetMetadata().DatasetId.Name, catalogMetadata.DatasetId.Name)
- assert.Equal(t, status.GetMetadata().ArtifactTag.ArtifactId, catalogMetadata.ArtifactTag.ArtifactId)
- assert.Equal(t, status.GetMetadata().ArtifactTag.Name, catalogMetadata.ArtifactTag.Name)
+ assert.Equal(t, status.GetMetadata().GetDatasetId().GetProject(), catalogMetadata.GetDatasetId().GetProject())
+ assert.Equal(t, status.GetMetadata().GetDatasetId().GetDomain(), catalogMetadata.GetDatasetId().GetDomain())
+ assert.Equal(t, status.GetMetadata().GetDatasetId().GetName(), catalogMetadata.GetDatasetId().GetName())
+ assert.Equal(t, status.GetMetadata().GetArtifactTag().GetArtifactId(), catalogMetadata.GetArtifactTag().GetArtifactId())
+ assert.Equal(t, status.GetMetadata().GetArtifactTag().GetName(), catalogMetadata.GetArtifactTag().GetName())
})
}
}
diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/hashing.go b/flyteplugins/go/tasks/pluginmachinery/catalog/hashing.go
index 4cc2fbd5cd..7dda4afa97 100644
--- a/flyteplugins/go/tasks/pluginmachinery/catalog/hashing.go
+++ b/flyteplugins/go/tasks/pluginmachinery/catalog/hashing.go
@@ -27,7 +27,7 @@ func hashify(literal *core.Literal) *core.Literal {
// 1. A collection of literals or
// 2. A map of literals
if literal.GetCollection() != nil {
- literals := literal.GetCollection().Literals
+ literals := literal.GetCollection().GetLiterals()
literalsHash := make([]*core.Literal, 0)
for _, lit := range literals {
literalsHash = append(literalsHash, hashify(lit))
@@ -42,7 +42,7 @@ func hashify(literal *core.Literal) *core.Literal {
}
if literal.GetMap() != nil {
literalsMap := make(map[string]*core.Literal)
- for key, lit := range literal.GetMap().Literals {
+ for key, lit := range literal.GetMap().GetLiterals() {
literalsMap[key] = hashify(lit)
}
return &core.Literal{
@@ -58,14 +58,14 @@ func hashify(literal *core.Literal) *core.Literal {
}
func HashLiteralMap(ctx context.Context, literalMap *core.LiteralMap, cacheIgnoreInputVars []string) (string, error) {
- if literalMap == nil || len(literalMap.Literals) == 0 {
+ if literalMap == nil || len(literalMap.GetLiterals()) == 0 {
literalMap = &emptyLiteralMap
}
// Hashify, i.e. generate a copy of the literal map where each literal value is removed
// in case the corresponding hash is set.
- hashifiedLiteralMap := make(map[string]*core.Literal, len(literalMap.Literals))
- for name, literal := range literalMap.Literals {
+ hashifiedLiteralMap := make(map[string]*core.Literal, len(literalMap.GetLiterals()))
+ for name, literal := range literalMap.GetLiterals() {
if !slices.Contains(cacheIgnoreInputVars, name) {
hashifiedLiteralMap[name] = hashify(literal)
}
diff --git a/flyteplugins/go/tasks/pluginmachinery/core/exec_metadata.go b/flyteplugins/go/tasks/pluginmachinery/core/exec_metadata.go
index 5969d44661..9a020bd188 100644
--- a/flyteplugins/go/tasks/pluginmachinery/core/exec_metadata.go
+++ b/flyteplugins/go/tasks/pluginmachinery/core/exec_metadata.go
@@ -27,7 +27,7 @@ type TaskExecutionID interface {
GetGeneratedNameWith(minLength, maxLength int) (string, error)
// GetID returns the underlying idl task identifier.
- GetID() core.TaskExecutionIdentifier
+ GetID() core.TaskExecutionIdentifier // TODO (whynopointer)
// GetUniqueNodeID returns the fully-qualified Node ID that is unique within a
// given workflow execution.
@@ -48,7 +48,7 @@ type TaskExecutionMetadata interface {
GetMaxAttempts() uint32
GetAnnotations() map[string]string
GetK8sServiceAccount() string
- GetSecurityContext() core.SecurityContext
+ GetSecurityContext() core.SecurityContext // TODO (whynopointer)
IsInterruptible() bool
GetPlatformResources() *v1.ResourceRequirements
GetInterruptibleFailureThreshold() int32
diff --git a/flyteplugins/go/tasks/pluginmachinery/core/template/template.go b/flyteplugins/go/tasks/pluginmachinery/core/template/template.go
index 5aea60c4b9..9192cf851c 100644
--- a/flyteplugins/go/tasks/pluginmachinery/core/template/template.go
+++ b/flyteplugins/go/tasks/pluginmachinery/core/template/template.go
@@ -162,7 +162,7 @@ func render(ctx context.Context, inputTemplate string, params Parameters, perRet
}
func transformVarNameToStringVal(ctx context.Context, varName string, inputs *idlCore.LiteralMap) (string, error) {
- inputVal, exists := inputs.Literals[varName]
+ inputVal, exists := inputs.GetLiterals()[varName]
if !exists {
return "", fmt.Errorf("requested input is not found [%s]", varName)
}
@@ -175,7 +175,7 @@ func transformVarNameToStringVal(ctx context.Context, varName string, inputs *id
}
func serializePrimitive(p *idlCore.Primitive) (string, error) {
- switch o := p.Value.(type) {
+ switch o := p.GetValue().(type) {
case *idlCore.Primitive_Integer:
return fmt.Sprintf("%v", o.Integer), nil
case *idlCore.Primitive_Boolean:
@@ -189,22 +189,22 @@ func serializePrimitive(p *idlCore.Primitive) (string, error) {
case *idlCore.Primitive_StringValue:
return o.StringValue, nil
default:
- return "", fmt.Errorf("received an unexpected primitive type [%v]", reflect.TypeOf(p.Value))
+ return "", fmt.Errorf("received an unexpected primitive type [%v]", reflect.TypeOf(p.GetValue()))
}
}
func serializeLiteralScalar(l *idlCore.Scalar) (string, error) {
- switch o := l.Value.(type) {
+ switch o := l.GetValue().(type) {
case *idlCore.Scalar_Primitive:
return serializePrimitive(o.Primitive)
case *idlCore.Scalar_Blob:
- return o.Blob.Uri, nil
+ return o.Blob.GetUri(), nil
case *idlCore.Scalar_Schema:
- return o.Schema.Uri, nil
+ return o.Schema.GetUri(), nil
case *idlCore.Scalar_Binary:
- binaryBytes := o.Binary.Value
+ binaryBytes := o.Binary.GetValue()
var currVal any
- if o.Binary.Tag == coreutils.MESSAGEPACK {
+ if o.Binary.GetTag() == coreutils.MESSAGEPACK {
err := msgpack.Unmarshal(binaryBytes, &currVal)
if err != nil {
return "", fmt.Errorf("failed to unmarshal messagepack bytes with literal:[%v], err:[%v]", l, err)
@@ -212,18 +212,18 @@ func serializeLiteralScalar(l *idlCore.Scalar) (string, error) {
// TODO: Try to support Primitive_Datetime, Primitive_Duration, Flyte File, and Flyte Directory.
return fmt.Sprintf("%v", currVal), nil
}
- return "", fmt.Errorf("unsupported binary tag [%v]", o.Binary.Tag)
+ return "", fmt.Errorf("unsupported binary tag [%v]", o.Binary.GetTag())
default:
- return "", fmt.Errorf("received an unexpected scalar type [%v]", reflect.TypeOf(l.Value))
+ return "", fmt.Errorf("received an unexpected scalar type [%v]", reflect.TypeOf(l.GetValue()))
}
}
func serializeLiteral(ctx context.Context, l *idlCore.Literal) (string, error) {
- switch o := l.Value.(type) {
+ switch o := l.GetValue().(type) {
case *idlCore.Literal_Collection:
- res := make([]string, 0, len(o.Collection.Literals))
- for _, sub := range o.Collection.Literals {
+ res := make([]string, 0, len(o.Collection.GetLiterals()))
+ for _, sub := range o.Collection.GetLiterals() {
s, err := serializeLiteral(ctx, sub)
if err != nil {
return "", err
@@ -237,6 +237,6 @@ func serializeLiteral(ctx context.Context, l *idlCore.Literal) (string, error) {
return serializeLiteralScalar(o.Scalar)
default:
logger.Debugf(ctx, "received unexpected primitive type")
- return "", fmt.Errorf("received an unexpected primitive type [%v]", reflect.TypeOf(l.Value))
+ return "", fmt.Errorf("received an unexpected primitive type [%v]", reflect.TypeOf(l.GetValue()))
}
}
diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/container_helper.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/container_helper.go
index 32d2e0180e..501798c798 100644
--- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/container_helper.go
+++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/container_helper.go
@@ -213,7 +213,7 @@ func BuildRawContainer(ctx context.Context, tCtx pluginscore.TaskExecutionContex
containerName = rand.String(4)
}
- res, err := ToK8sResourceRequirements(taskContainer.Resources)
+ res, err := ToK8sResourceRequirements(taskContainer.GetResources())
if err != nil {
return nil, err
}
diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot.go
index eaee5bce6c..427dae9978 100644
--- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot.go
+++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot.go
@@ -162,7 +162,7 @@ func CalculateStorageSize(requirements *v1.ResourceRequirements) *resource.Quant
}
func AddCoPilotToContainer(ctx context.Context, cfg config.FlyteCoPilotConfig, c *v1.Container, iFace *core.TypedInterface, pilot *core.DataLoadingConfig) error {
- if pilot == nil || !pilot.Enabled {
+ if pilot == nil || !pilot.GetEnabled() {
return nil
}
logger.Infof(ctx, "Enabling CoPilot on main container [%s]", c.Name)
@@ -175,7 +175,7 @@ func AddCoPilotToContainer(ctx context.Context, cfg config.FlyteCoPilotConfig, c
c.SecurityContext.Capabilities.Add = append(c.SecurityContext.Capabilities.Add, pTraceCapability)
if iFace != nil {
- if iFace.Inputs != nil && len(iFace.Inputs.Variables) > 0 {
+ if iFace.GetInputs() != nil && len(iFace.GetInputs().GetVariables()) > 0 {
inPath := cfg.DefaultInputDataPath
if pilot.GetInputPath() != "" {
inPath = pilot.GetInputPath()
@@ -187,7 +187,7 @@ func AddCoPilotToContainer(ctx context.Context, cfg config.FlyteCoPilotConfig, c
})
}
- if iFace.Outputs != nil && len(iFace.Outputs.Variables) > 0 {
+ if iFace.GetOutputs() != nil && len(iFace.GetOutputs().GetVariables()) > 0 {
outPath := cfg.DefaultOutputPath
if pilot.GetOutputPath() != "" {
outPath = pilot.GetOutputPath()
@@ -202,16 +202,17 @@ func AddCoPilotToContainer(ctx context.Context, cfg config.FlyteCoPilotConfig, c
}
func AddCoPilotToPod(ctx context.Context, cfg config.FlyteCoPilotConfig, coPilotPod *v1.PodSpec, iFace *core.TypedInterface, taskExecMetadata core2.TaskExecutionMetadata, inputPaths io.InputFilePaths, outputPaths io.OutputFilePaths, pilot *core.DataLoadingConfig) (string, error) {
- if pilot == nil || !pilot.Enabled {
+ if pilot == nil || !pilot.GetEnabled() {
return "", nil
}
- logger.Infof(ctx, "CoPilot Enabled for task [%s]", taskExecMetadata.GetTaskExecutionID().GetID().TaskId.Name)
+ //nolint:protogetter
+ logger.Infof(ctx, "CoPilot Enabled for task [%s]", taskExecMetadata.GetTaskExecutionID().GetID().TaskId.GetName())
shareProcessNamespaceEnabled := true
coPilotPod.ShareProcessNamespace = &shareProcessNamespaceEnabled
primaryInitContainerName := ""
if iFace != nil {
- if iFace.Inputs != nil && len(iFace.Inputs.Variables) > 0 {
+ if iFace.GetInputs() != nil && len(iFace.GetInputs().GetVariables()) > 0 {
inPath := cfg.DefaultInputDataPath
if pilot.GetInputPath() != "" {
inPath = pilot.GetInputPath()
@@ -219,18 +220,19 @@ func AddCoPilotToPod(ctx context.Context, cfg config.FlyteCoPilotConfig, coPilot
// TODO we should calculate input volume size based on the size of the inputs which is known ahead of time. We should store that as part of the metadata
size := CalculateStorageSize(taskExecMetadata.GetOverrides().GetResources())
- logger.Infof(ctx, "Adding Input path [%s] of Size [%d] for Task [%s]", inPath, size, taskExecMetadata.GetTaskExecutionID().GetID().TaskId.Name)
+ //nolint:protogetter
+ logger.Infof(ctx, "Adding Input path [%s] of Size [%d] for Task [%s]", inPath, size, taskExecMetadata.GetTaskExecutionID().GetID().TaskId.GetName())
inputsVolumeMount := v1.VolumeMount{
Name: cfg.InputVolumeName,
MountPath: inPath,
}
- format := pilot.Format
+ format := pilot.GetFormat()
// Lets add the InputsVolume
coPilotPod.Volumes = append(coPilotPod.Volumes, DataVolume(cfg.InputVolumeName, size))
// Lets add the Inputs init container
- args, err := DownloadCommandArgs(inputPaths.GetInputPath(), outputPaths.GetOutputPrefixPath(), inPath, format, iFace.Inputs)
+ args, err := DownloadCommandArgs(inputPaths.GetInputPath(), outputPaths.GetOutputPrefixPath(), inPath, format, iFace.GetInputs())
if err != nil {
return primaryInitContainerName, err
}
@@ -242,14 +244,15 @@ func AddCoPilotToPod(ctx context.Context, cfg config.FlyteCoPilotConfig, coPilot
primaryInitContainerName = downloader.Name
}
- if iFace.Outputs != nil && len(iFace.Outputs.Variables) > 0 {
+ if iFace.GetOutputs() != nil && len(iFace.GetOutputs().GetVariables()) > 0 {
outPath := cfg.DefaultOutputPath
if pilot.GetOutputPath() != "" {
outPath = pilot.GetOutputPath()
}
size := CalculateStorageSize(taskExecMetadata.GetOverrides().GetResources())
- logger.Infof(ctx, "Adding Output path [%s] of size [%d] for Task [%s]", size, outPath, taskExecMetadata.GetTaskExecutionID().GetID().TaskId.Name)
+ //nolint:protogetter
+ logger.Infof(ctx, "Adding Output path [%s] of size [%d] for Task [%s]", size, outPath, taskExecMetadata.GetTaskExecutionID().GetID().TaskId.GetName())
outputsVolumeMount := v1.VolumeMount{
Name: cfg.OutputVolumeName,
diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot_test.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot_test.go
index aba18c85ac..182354d07c 100644
--- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot_test.go
+++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot_test.go
@@ -132,11 +132,11 @@ func TestDownloadCommandArgs(t *testing.T) {
if assert.NoError(t, err) {
vm := &core.VariableMap{}
assert.NoError(t, proto.Unmarshal(serIFaceBytes, vm))
- assert.Len(t, vm.Variables, 2)
- for k, v := range iFace.Variables {
- v2, ok := vm.Variables[k]
+ assert.Len(t, vm.GetVariables(), 2)
+ for k, v := range iFace.GetVariables() {
+ v2, ok := vm.GetVariables()[k]
assert.True(t, ok)
- assert.Equal(t, v.Type.GetSimple(), v2.Type.GetSimple(), "for %s, types do not match", k)
+ assert.Equal(t, v.GetType().GetSimple(), v2.GetType().GetSimple(), "for %s, types do not match", k)
}
}
}
@@ -167,11 +167,11 @@ func TestSidecarCommandArgs(t *testing.T) {
if assert.NoError(t, err) {
if2 := &core.TypedInterface{}
assert.NoError(t, proto.Unmarshal(serIFaceBytes, if2))
- assert.Len(t, if2.Outputs.Variables, 2)
- for k, v := range iFace.Outputs.Variables {
- v2, ok := if2.Outputs.Variables[k]
+ assert.Len(t, if2.GetOutputs().GetVariables(), 2)
+ for k, v := range iFace.GetOutputs().GetVariables() {
+ v2, ok := if2.GetOutputs().GetVariables()[k]
assert.True(t, ok)
- assert.Equal(t, v.Type.GetSimple(), v2.Type.GetSimple(), "for %s, types do not match", k)
+ assert.Equal(t, v.GetType().GetSimple(), v2.GetType().GetSimple(), "for %s, types do not match", k)
}
}
}
@@ -196,20 +196,20 @@ func assertContainerHasVolumeMounts(t *testing.T, cfg config.FlyteCoPilotConfig,
for _, v := range c.VolumeMounts {
vmap[v.Name] = v
}
- if iFace.Inputs != nil {
+ if iFace.GetInputs() != nil {
path := cfg.DefaultInputDataPath
- if pilot.InputPath != "" {
- path = pilot.InputPath
+ if pilot.GetInputPath() != "" {
+ path = pilot.GetInputPath()
}
v, found := vmap[cfg.InputVolumeName]
assert.Equal(t, path, v.MountPath, "Input Path does not match")
assert.True(t, found, "Input volume mount expected but not found!")
}
- if iFace.Outputs != nil {
+ if iFace.GetOutputs() != nil {
path := cfg.DefaultOutputPath
- if pilot.OutputPath != "" {
- path = pilot.OutputPath
+ if pilot.GetOutputPath() != "" {
+ path = pilot.GetOutputPath()
}
v, found := vmap[cfg.OutputVolumeName]
assert.Equal(t, path, v.MountPath, "Output Path does not match")
@@ -260,10 +260,10 @@ func assertPodHasCoPilot(t *testing.T, cfg config.FlyteCoPilotConfig, pilot *cor
for _, v := range c.VolumeMounts {
vmap[v.Name] = v
}
- if iFace.Inputs != nil {
+ if iFace.GetInputs() != nil {
path := cfg.DefaultInputDataPath
if pilot != nil {
- path = pilot.InputPath
+ path = pilot.GetInputPath()
}
v, found := vmap[cfg.InputVolumeName]
if c.Name == cfg.NamePrefix+flyteInitContainerName {
@@ -274,10 +274,10 @@ func assertPodHasCoPilot(t *testing.T, cfg config.FlyteCoPilotConfig, pilot *cor
}
}
- if iFace.Outputs != nil {
+ if iFace.GetOutputs() != nil {
path := cfg.DefaultOutputPath
if pilot != nil {
- path = pilot.OutputPath
+ path = pilot.GetOutputPath()
}
v, found := vmap[cfg.OutputVolumeName]
if c.Name == cfg.NamePrefix+flyteInitContainerName {
diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go
index 22d098785f..9d87a4059e 100644
--- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go
+++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go
@@ -43,25 +43,26 @@ func GetContextEnvVars(ownerCtx context.Context) []v1.EnvVar {
func GetExecutionEnvVars(id pluginsCore.TaskExecutionID, consoleURL string) []v1.EnvVar {
- if id == nil || id.GetID().NodeExecutionId == nil || id.GetID().NodeExecutionId.ExecutionId == nil {
+ //nolint:protogetter
+ if id == nil || id.GetID().NodeExecutionId == nil || id.GetID().NodeExecutionId.GetExecutionId() == nil {
return []v1.EnvVar{}
}
// Execution level env variables.
- nodeExecutionID := id.GetID().NodeExecutionId.ExecutionId
- attemptNumber := strconv.Itoa(int(id.GetID().RetryAttempt))
+ nodeExecutionID := id.GetID().NodeExecutionId.GetExecutionId() //nolint:protogetter
+ attemptNumber := strconv.Itoa(int(id.GetID().RetryAttempt)) //nolint:protogetter
envVars := []v1.EnvVar{
{
Name: "FLYTE_INTERNAL_EXECUTION_ID",
- Value: nodeExecutionID.Name,
+ Value: nodeExecutionID.GetName(),
},
{
Name: "FLYTE_INTERNAL_EXECUTION_PROJECT",
- Value: nodeExecutionID.Project,
+ Value: nodeExecutionID.GetProject(),
},
{
Name: "FLYTE_INTERNAL_EXECUTION_DOMAIN",
- Value: nodeExecutionID.Domain,
+ Value: nodeExecutionID.GetDomain(),
},
{
Name: "FLYTE_ATTEMPT_NUMBER",
@@ -82,48 +83,48 @@ func GetExecutionEnvVars(id pluginsCore.TaskExecutionID, consoleURL string) []v1
consoleURL = strings.TrimRight(consoleURL, "/")
envVars = append(envVars, v1.EnvVar{
Name: flyteExecutionURL,
- Value: fmt.Sprintf("%s/projects/%s/domains/%s/executions/%s/nodeId/%s/nodes", consoleURL, nodeExecutionID.Project, nodeExecutionID.Domain, nodeExecutionID.Name, id.GetUniqueNodeID()),
+ Value: fmt.Sprintf("%s/projects/%s/domains/%s/executions/%s/nodeId/%s/nodes", consoleURL, nodeExecutionID.GetProject(), nodeExecutionID.GetDomain(), nodeExecutionID.GetName(), id.GetUniqueNodeID()),
})
}
// Task definition Level env variables.
- if id.GetID().TaskId != nil {
- taskID := id.GetID().TaskId
+ if id.GetID().TaskId != nil { //nolint:protogetter
+ taskID := id.GetID().TaskId //nolint:protogetter
envVars = append(envVars,
v1.EnvVar{
Name: "FLYTE_INTERNAL_TASK_PROJECT",
- Value: taskID.Project,
+ Value: taskID.GetProject(),
},
v1.EnvVar{
Name: "FLYTE_INTERNAL_TASK_DOMAIN",
- Value: taskID.Domain,
+ Value: taskID.GetDomain(),
},
v1.EnvVar{
Name: "FLYTE_INTERNAL_TASK_NAME",
- Value: taskID.Name,
+ Value: taskID.GetName(),
},
v1.EnvVar{
Name: "FLYTE_INTERNAL_TASK_VERSION",
- Value: taskID.Version,
+ Value: taskID.GetVersion(),
},
// Historic Task Definition Level env variables.
// Remove these once SDK is migrated to use the new ones.
v1.EnvVar{
Name: "FLYTE_INTERNAL_PROJECT",
- Value: taskID.Project,
+ Value: taskID.GetProject(),
},
v1.EnvVar{
Name: "FLYTE_INTERNAL_DOMAIN",
- Value: taskID.Domain,
+ Value: taskID.GetDomain(),
},
v1.EnvVar{
Name: "FLYTE_INTERNAL_NAME",
- Value: taskID.Name,
+ Value: taskID.GetName(),
},
v1.EnvVar{
Name: "FLYTE_INTERNAL_VERSION",
- Value: taskID.Version,
+ Value: taskID.GetVersion(),
})
}
diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper.go
index 229f963968..53acac5512 100644
--- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper.go
+++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper.go
@@ -287,15 +287,15 @@ func BuildRawPod(ctx context.Context, tCtx pluginsCore.TaskExecutionContext) (*v
}
case *core.TaskTemplate_K8SPod:
// handles pod tasks that marshal the pod spec to the k8s_pod task target.
- if target.K8SPod.PodSpec == nil {
+ if target.K8SPod.GetPodSpec() == nil {
return nil, nil, "", pluginserrors.Errorf(pluginserrors.BadTaskSpecification,
"Pod tasks with task type version > 1 should specify their target as a K8sPod with a defined pod spec")
}
- err := utils.UnmarshalStructToObj(target.K8SPod.PodSpec, &podSpec)
+ err := utils.UnmarshalStructToObj(target.K8SPod.GetPodSpec(), &podSpec)
if err != nil {
return nil, nil, "", pluginserrors.Errorf(pluginserrors.BadTaskSpecification,
- "Unable to unmarshal task k8s pod [%v], Err: [%v]", target.K8SPod.PodSpec, err.Error())
+ "Unable to unmarshal task k8s pod [%v], Err: [%v]", target.K8SPod.GetPodSpec(), err.Error())
}
// get primary container name
@@ -306,9 +306,9 @@ func BuildRawPod(ctx context.Context, tCtx pluginsCore.TaskExecutionContext) (*v
}
// update annotations and labels
- if taskTemplate.GetK8SPod().Metadata != nil {
- mergeMapInto(target.K8SPod.Metadata.Annotations, objectMeta.Annotations)
- mergeMapInto(target.K8SPod.Metadata.Labels, objectMeta.Labels)
+ if taskTemplate.GetK8SPod().GetMetadata() != nil {
+ mergeMapInto(target.K8SPod.GetMetadata().GetAnnotations(), objectMeta.Annotations)
+ mergeMapInto(target.K8SPod.GetMetadata().GetLabels(), objectMeta.Labels)
}
default:
return nil, nil, "", pluginserrors.Errorf(pluginserrors.BadTaskSpecification,
@@ -393,7 +393,7 @@ func ApplyFlytePodConfiguration(ctx context.Context, tCtx pluginsCore.TaskExecut
if dataLoadingConfig != nil {
if err := AddCoPilotToContainer(ctx, config.GetK8sPluginConfig().CoPilot,
- primaryContainer, taskTemplate.Interface, dataLoadingConfig); err != nil {
+ primaryContainer, taskTemplate.GetInterface(), dataLoadingConfig); err != nil {
return nil, nil, err
}
@@ -483,11 +483,11 @@ func getBasePodTemplate(ctx context.Context, tCtx pluginsCore.TaskExecutionConte
}
var podTemplate *v1.PodTemplate
- if taskTemplate.Metadata != nil && len(taskTemplate.Metadata.PodTemplateName) > 0 {
+ if taskTemplate.GetMetadata() != nil && len(taskTemplate.GetMetadata().GetPodTemplateName()) > 0 {
// retrieve PodTemplate by name from PodTemplateStore
- podTemplate = podTemplateStore.LoadOrDefault(tCtx.TaskExecutionMetadata().GetNamespace(), taskTemplate.Metadata.PodTemplateName)
+ podTemplate = podTemplateStore.LoadOrDefault(tCtx.TaskExecutionMetadata().GetNamespace(), taskTemplate.GetMetadata().GetPodTemplateName())
if podTemplate == nil {
- return nil, pluginserrors.Errorf(pluginserrors.BadTaskSpecification, "PodTemplate '%s' does not exist", taskTemplate.Metadata.PodTemplateName)
+ return nil, pluginserrors.Errorf(pluginserrors.BadTaskSpecification, "PodTemplate '%s' does not exist", taskTemplate.GetMetadata().GetPodTemplateName())
}
} else {
// check for default PodTemplate
diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper_test.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper_test.go
index 9797b5e05b..65194d01be 100644
--- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper_test.go
+++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper_test.go
@@ -1529,7 +1529,7 @@ func TestDemystifyPendingTimeout(t *testing.T) {
taskStatus, err := DemystifyPending(s, pluginsCore.TaskInfo{})
assert.NoError(t, err)
assert.Equal(t, pluginsCore.PhaseRetryableFailure, taskStatus.Phase())
- assert.Equal(t, "PodPendingTimeout", taskStatus.Err().Code)
+ assert.Equal(t, "PodPendingTimeout", taskStatus.Err().GetCode())
assert.True(t, taskStatus.CleanupOnFailure())
})
}
@@ -1549,7 +1549,7 @@ func TestDemystifySuccess(t *testing.T) {
}, pluginsCore.TaskInfo{})
assert.Nil(t, err)
assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase())
- assert.Equal(t, "OOMKilled", phaseInfo.Err().Code)
+ assert.Equal(t, "OOMKilled", phaseInfo.Err().GetCode())
})
t.Run("InitContainer OOMKilled", func(t *testing.T) {
@@ -1566,7 +1566,7 @@ func TestDemystifySuccess(t *testing.T) {
}, pluginsCore.TaskInfo{})
assert.Nil(t, err)
assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase())
- assert.Equal(t, "OOMKilled", phaseInfo.Err().Code)
+ assert.Equal(t, "OOMKilled", phaseInfo.Err().GetCode())
})
t.Run("success", func(t *testing.T) {
@@ -1581,16 +1581,16 @@ func TestDemystifyFailure(t *testing.T) {
phaseInfo, err := DemystifyFailure(v1.PodStatus{}, pluginsCore.TaskInfo{})
assert.Nil(t, err)
assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase())
- assert.Equal(t, "UnknownError", phaseInfo.Err().Code)
- assert.Equal(t, core.ExecutionError_USER, phaseInfo.Err().Kind)
+ assert.Equal(t, "UnknownError", phaseInfo.Err().GetCode())
+ assert.Equal(t, core.ExecutionError_USER, phaseInfo.Err().GetKind())
})
t.Run("known-error", func(t *testing.T) {
phaseInfo, err := DemystifyFailure(v1.PodStatus{Reason: "hello"}, pluginsCore.TaskInfo{})
assert.Nil(t, err)
assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase())
- assert.Equal(t, "hello", phaseInfo.Err().Code)
- assert.Equal(t, core.ExecutionError_USER, phaseInfo.Err().Kind)
+ assert.Equal(t, "hello", phaseInfo.Err().GetCode())
+ assert.Equal(t, core.ExecutionError_USER, phaseInfo.Err().GetKind())
})
t.Run("OOMKilled", func(t *testing.T) {
@@ -1608,8 +1608,8 @@ func TestDemystifyFailure(t *testing.T) {
}, pluginsCore.TaskInfo{})
assert.Nil(t, err)
assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase())
- assert.Equal(t, "OOMKilled", phaseInfo.Err().Code)
- assert.Equal(t, core.ExecutionError_USER, phaseInfo.Err().Kind)
+ assert.Equal(t, "OOMKilled", phaseInfo.Err().GetCode())
+ assert.Equal(t, core.ExecutionError_USER, phaseInfo.Err().GetKind())
})
t.Run("SIGKILL", func(t *testing.T) {
@@ -1627,8 +1627,8 @@ func TestDemystifyFailure(t *testing.T) {
}, pluginsCore.TaskInfo{})
assert.Nil(t, err)
assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase())
- assert.Equal(t, "Interrupted", phaseInfo.Err().Code)
- assert.Equal(t, core.ExecutionError_USER, phaseInfo.Err().Kind)
+ assert.Equal(t, "Interrupted", phaseInfo.Err().GetCode())
+ assert.Equal(t, core.ExecutionError_USER, phaseInfo.Err().GetKind())
})
t.Run("GKE kubelet graceful node shutdown", func(t *testing.T) {
@@ -1649,9 +1649,9 @@ func TestDemystifyFailure(t *testing.T) {
}, pluginsCore.TaskInfo{})
assert.Nil(t, err)
assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase())
- assert.Equal(t, "Interrupted", phaseInfo.Err().Code)
- assert.Equal(t, core.ExecutionError_SYSTEM, phaseInfo.Err().Kind)
- assert.Contains(t, phaseInfo.Err().Message, containerReason)
+ assert.Equal(t, "Interrupted", phaseInfo.Err().GetCode())
+ assert.Equal(t, core.ExecutionError_SYSTEM, phaseInfo.Err().GetKind())
+ assert.Contains(t, phaseInfo.Err().GetMessage(), containerReason)
})
t.Run("GKE kubelet graceful node shutdown", func(t *testing.T) {
@@ -1672,9 +1672,9 @@ func TestDemystifyFailure(t *testing.T) {
}, pluginsCore.TaskInfo{})
assert.Nil(t, err)
assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase())
- assert.Equal(t, "Interrupted", phaseInfo.Err().Code)
- assert.Equal(t, core.ExecutionError_SYSTEM, phaseInfo.Err().Kind)
- assert.Contains(t, phaseInfo.Err().Message, containerReason)
+ assert.Equal(t, "Interrupted", phaseInfo.Err().GetCode())
+ assert.Equal(t, core.ExecutionError_SYSTEM, phaseInfo.Err().GetKind())
+ assert.Contains(t, phaseInfo.Err().GetMessage(), containerReason)
})
}
@@ -1705,8 +1705,8 @@ func TestDemystifyPending_testcases(t *testing.T) {
assert.NotNil(t, p)
assert.Equal(t, p.Phase(), pluginsCore.PhaseRetryableFailure)
if assert.NotNil(t, p.Err()) {
- assert.Equal(t, p.Err().Code, tt.errCode)
- assert.Equal(t, p.Err().Message, tt.message)
+ assert.Equal(t, p.Err().GetCode(), tt.errCode)
+ assert.Equal(t, p.Err().GetMessage(), tt.message)
}
}
}
@@ -1765,8 +1765,8 @@ func TestDeterminePrimaryContainerPhase(t *testing.T) {
},
}, info)
assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase())
- assert.Equal(t, "foo", phaseInfo.Err().Code)
- assert.Equal(t, "\r\n[primary] terminated with exit code (1). Reason [foo]. Message: \nfoo failed.", phaseInfo.Err().Message)
+ assert.Equal(t, "foo", phaseInfo.Err().GetCode())
+ assert.Equal(t, "\r\n[primary] terminated with exit code (1). Reason [foo]. Message: \nfoo failed.", phaseInfo.Err().GetMessage())
})
t.Run("primary container succeeded", func(t *testing.T) {
phaseInfo := DeterminePrimaryContainerPhase(primaryContainerName, []v1.ContainerStatus{
@@ -1786,8 +1786,8 @@ func TestDeterminePrimaryContainerPhase(t *testing.T) {
secondaryContainer,
}, info)
assert.Equal(t, pluginsCore.PhasePermanentFailure, phaseInfo.Phase())
- assert.Equal(t, PrimaryContainerNotFound, phaseInfo.Err().Code)
- assert.Equal(t, "Primary container [primary] not found in pod's container statuses", phaseInfo.Err().Message)
+ assert.Equal(t, PrimaryContainerNotFound, phaseInfo.Err().GetCode())
+ assert.Equal(t, "Primary container [primary] not found in pod's container statuses", phaseInfo.Err().GetMessage())
})
t.Run("primary container failed with OOMKilled", func(t *testing.T) {
phaseInfo := DeterminePrimaryContainerPhase(primaryContainerName, []v1.ContainerStatus{
@@ -1803,8 +1803,8 @@ func TestDeterminePrimaryContainerPhase(t *testing.T) {
},
}, info)
assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase())
- assert.Equal(t, OOMKilled, phaseInfo.Err().Code)
- assert.Equal(t, "\r\n[primary] terminated with exit code (0). Reason [OOMKilled]. Message: \nfoo failed.", phaseInfo.Err().Message)
+ assert.Equal(t, OOMKilled, phaseInfo.Err().GetCode())
+ assert.Equal(t, "\r\n[primary] terminated with exit code (0). Reason [OOMKilled]. Message: \nfoo failed.", phaseInfo.Err().GetMessage())
})
}
diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/utils.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/utils.go
index ef7807aadd..fab4f84997 100644
--- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/utils.go
+++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/utils.go
@@ -12,7 +12,7 @@ import (
func ToK8sEnvVar(env []*core.KeyValuePair) []v1.EnvVar {
envVars := make([]v1.EnvVar, 0, len(env))
for _, kv := range env {
- envVars = append(envVars, v1.EnvVar{Name: kv.Key, Value: kv.Value})
+ envVars = append(envVars, v1.EnvVar{Name: kv.GetKey(), Value: kv.GetValue()})
}
return envVars
}
@@ -22,12 +22,12 @@ func ToK8sEnvVar(env []*core.KeyValuePair) []v1.EnvVar {
func ToK8sResourceList(resources []*core.Resources_ResourceEntry) (v1.ResourceList, error) {
k8sResources := make(v1.ResourceList, len(resources))
for _, r := range resources {
- rVal := r.Value
+ rVal := r.GetValue()
v, err := resource.ParseQuantity(rVal)
if err != nil {
return nil, errors.Wrap(err, "Failed to parse resource as a valid quantity.")
}
- switch r.Name {
+ switch r.GetName() {
case core.Resources_CPU:
if !v.IsZero() {
k8sResources[v1.ResourceCPU] = v
@@ -54,11 +54,11 @@ func ToK8sResourceRequirements(resources *core.Resources) (*v1.ResourceRequireme
if resources == nil {
return res, nil
}
- req, err := ToK8sResourceList(resources.Requests)
+ req, err := ToK8sResourceList(resources.GetRequests())
if err != nil {
return res, err
}
- lim, err := ToK8sResourceList(resources.Limits)
+ lim, err := ToK8sResourceList(resources.GetLimits())
if err != nil {
return res, err
}
diff --git a/flyteplugins/go/tasks/pluginmachinery/internal/webapi/cache.go b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/cache.go
index 7569abd90e..b9efcd7372 100644
--- a/flyteplugins/go/tasks/pluginmachinery/internal/webapi/cache.go
+++ b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/cache.go
@@ -179,7 +179,7 @@ func NewResourceCache(ctx context.Context, name string, client Client, cfg webap
workqueue.NewMaxOfRateLimiter(
workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second),
&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(rateCfg.QPS), rateCfg.Burst)},
- ), cfg.ResyncInterval.Duration, cfg.Workers, cfg.Size,
+ ), cfg.ResyncInterval.Duration, uint(cfg.Workers), uint(cfg.Size), // #nosec G115
scope.NewSubScope("cache"))
if err != nil {
diff --git a/flyteplugins/go/tasks/pluginmachinery/io/mocks/error_reader.go b/flyteplugins/go/tasks/pluginmachinery/io/mocks/error_reader.go
new file mode 100644
index 0000000000..161e863f49
--- /dev/null
+++ b/flyteplugins/go/tasks/pluginmachinery/io/mocks/error_reader.go
@@ -0,0 +1,93 @@
+// Code generated by mockery v1.0.1. DO NOT EDIT.
+
+package mocks
+
+import (
+ context "context"
+
+ io "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/io"
+ mock "github.com/stretchr/testify/mock"
+)
+
+// ErrorReader is an autogenerated mock type for the ErrorReader type
+type ErrorReader struct {
+ mock.Mock
+}
+
+type ErrorReader_IsError struct {
+ *mock.Call
+}
+
+func (_m ErrorReader_IsError) Return(_a0 bool, _a1 error) *ErrorReader_IsError {
+ return &ErrorReader_IsError{Call: _m.Call.Return(_a0, _a1)}
+}
+
+func (_m *ErrorReader) OnIsError(ctx context.Context) *ErrorReader_IsError {
+ c_call := _m.On("IsError", ctx)
+ return &ErrorReader_IsError{Call: c_call}
+}
+
+func (_m *ErrorReader) OnIsErrorMatch(matchers ...interface{}) *ErrorReader_IsError {
+ c_call := _m.On("IsError", matchers...)
+ return &ErrorReader_IsError{Call: c_call}
+}
+
+// IsError provides a mock function with given fields: ctx
+func (_m *ErrorReader) IsError(ctx context.Context) (bool, error) {
+ ret := _m.Called(ctx)
+
+ var r0 bool
+ if rf, ok := ret.Get(0).(func(context.Context) bool); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(bool)
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+type ErrorReader_ReadError struct {
+ *mock.Call
+}
+
+func (_m ErrorReader_ReadError) Return(_a0 io.ExecutionError, _a1 error) *ErrorReader_ReadError {
+ return &ErrorReader_ReadError{Call: _m.Call.Return(_a0, _a1)}
+}
+
+func (_m *ErrorReader) OnReadError(ctx context.Context) *ErrorReader_ReadError {
+ c_call := _m.On("ReadError", ctx)
+ return &ErrorReader_ReadError{Call: c_call}
+}
+
+func (_m *ErrorReader) OnReadErrorMatch(matchers ...interface{}) *ErrorReader_ReadError {
+ c_call := _m.On("ReadError", matchers...)
+ return &ErrorReader_ReadError{Call: c_call}
+}
+
+// ReadError provides a mock function with given fields: ctx
+func (_m *ErrorReader) ReadError(ctx context.Context) (io.ExecutionError, error) {
+ ret := _m.Called(ctx)
+
+ var r0 io.ExecutionError
+ if rf, ok := ret.Get(0).(func(context.Context) io.ExecutionError); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(io.ExecutionError)
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/in_memory_output_reader_test.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/in_memory_output_reader_test.go
index ad82fca8a3..52b58b732d 100644
--- a/flyteplugins/go/tasks/pluginmachinery/ioutils/in_memory_output_reader_test.go
+++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/in_memory_output_reader_test.go
@@ -39,7 +39,7 @@ func TestInMemoryOutputReader(t *testing.T) {
assert.NoError(t, err)
literalMap, executionErr, err := or.Read(ctx)
- assert.Equal(t, lt, literalMap.Literals)
+ assert.Equal(t, lt, literalMap.GetLiterals())
assert.Nil(t, executionErr)
assert.NoError(t, err)
}
diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/precomputed_shardselector.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/precomputed_shardselector.go
index 2a20272f6e..909d1fedfa 100644
--- a/flyteplugins/go/tasks/pluginmachinery/ioutils/precomputed_shardselector.go
+++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/precomputed_shardselector.go
@@ -68,6 +68,6 @@ func NewBase36PrefixShardSelector(ctx context.Context) (ShardSelector, error) {
func NewConstantShardSelector(shards []string) ShardSelector {
return &PrecomputedShardSelector{
precomputedPrefixes: shards,
- buckets: uint32(len(shards)),
+ buckets: uint32(len(shards)), // #nosec G115
}
}
diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader.go
index ae880f3640..c28f5ac94d 100644
--- a/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader.go
+++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader.go
@@ -105,7 +105,7 @@ func (s *singleFileErrorReader) IsError(ctx context.Context) (bool, error) {
}
func errorDoc2ExecutionError(errorDoc *core.ErrorDocument, errorFilePath storage.DataReference) io.ExecutionError {
- if errorDoc.Error == nil {
+ if errorDoc.GetError() == nil {
return io.ExecutionError{
IsRecoverable: true,
ExecutionError: &core.ExecutionError{
@@ -117,15 +117,15 @@ func errorDoc2ExecutionError(errorDoc *core.ErrorDocument, errorFilePath storage
}
executionError := io.ExecutionError{
ExecutionError: &core.ExecutionError{
- Code: errorDoc.Error.Code,
- Message: errorDoc.Error.Message,
- Kind: errorDoc.Error.Origin,
- Timestamp: errorDoc.Error.Timestamp,
- Worker: errorDoc.Error.Worker,
+ Code: errorDoc.GetError().GetCode(),
+ Message: errorDoc.GetError().GetMessage(),
+ Kind: errorDoc.GetError().GetOrigin(),
+ Timestamp: errorDoc.GetError().GetTimestamp(),
+ Worker: errorDoc.GetError().GetWorker(),
},
}
- if errorDoc.Error.Kind == core.ContainerError_RECOVERABLE {
+ if errorDoc.GetError().GetKind() == core.ContainerError_RECOVERABLE {
executionError.IsRecoverable = true
}
@@ -201,7 +201,7 @@ func (e *earliestFileErrorReader) ReadError(ctx context.Context) (io.ExecutionEr
if err != nil {
return io.ExecutionError{}, errors.Wrapf(err, "failed to read error file @[%s]", errorFilePath.String())
}
- timestamp := errorDoc.Error.GetTimestamp().AsTime()
+ timestamp := errorDoc.GetError().GetTimestamp().AsTime()
if earliestTimestamp == nil || earliestTimestamp.After(timestamp) {
earliestExecutionError = errorDoc2ExecutionError(errorDoc, errorFilePath)
earliestTimestamp = ×tamp
diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader_test.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader_test.go
index 59c7f48307..b2dbb0ba55 100644
--- a/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader_test.go
+++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader_test.go
@@ -91,7 +91,7 @@ func TestReadOrigin(t *testing.T) {
incomingErrorDoc := args.Get(2)
assert.NotNil(t, incomingErrorDoc)
casted := incomingErrorDoc.(*core.ErrorDocument)
- casted.Error = errorDoc.Error
+ casted.Error = errorDoc.GetError()
}).Return(nil)
store.OnHead(ctx, storage.DataReference("deck.html")).Return(MemoryMetadata{
@@ -129,7 +129,7 @@ func TestReadOrigin(t *testing.T) {
incomingErrorDoc := args.Get(2)
assert.NotNil(t, incomingErrorDoc)
casted := incomingErrorDoc.(*core.ErrorDocument)
- casted.Error = errorDoc.Error
+ casted.Error = errorDoc.GetError()
}).Return(nil)
maxPayloadSize := int64(0)
@@ -168,7 +168,7 @@ func TestReadOrigin(t *testing.T) {
incomingErrorDoc := args.Get(2)
assert.NotNil(t, incomingErrorDoc)
casted := incomingErrorDoc.(*core.ErrorDocument)
- casted.Error = errorDoc.Error
+ casted.Error = errorDoc.GetError()
}).Return(nil)
store.OnList(ctx, storage.DataReference("s3://errors/error"), 1000, storage.NewCursorAtStart()).Return(
@@ -227,7 +227,7 @@ func TestReadOrigin(t *testing.T) {
incomingErrorDoc := args.Get(2)
assert.NotNil(t, incomingErrorDoc)
casted := incomingErrorDoc.(*core.ErrorDocument)
- casted.Error = errorDoc.Error
+ casted.Error = errorDoc.GetError()
}).Return(nil)
store.OnList(ctx, storage.DataReference("s3://errors/error"), 1000, storage.NewCursorAtStart()).Return(
diff --git a/flyteplugins/go/tasks/pluginmachinery/tasklog/template.go b/flyteplugins/go/tasks/pluginmachinery/tasklog/template.go
index 19aae6ba7c..6080754036 100644
--- a/flyteplugins/go/tasks/pluginmachinery/tasklog/template.go
+++ b/flyteplugins/go/tasks/pluginmachinery/tasklog/template.go
@@ -120,44 +120,44 @@ func (input Input) templateVars() []TemplateVar {
},
TemplateVar{
defaultRegexes.TaskRetryAttempt,
- strconv.FormatUint(uint64(taskExecutionIdentifier.RetryAttempt), 10),
+ strconv.FormatUint(uint64(taskExecutionIdentifier.GetRetryAttempt()), 10),
},
)
- if taskExecutionIdentifier.TaskId != nil {
+ if taskExecutionIdentifier.GetTaskId() != nil {
vars = append(
vars,
TemplateVar{
defaultRegexes.TaskID,
- taskExecutionIdentifier.TaskId.Name,
+ taskExecutionIdentifier.GetTaskId().GetName(),
},
TemplateVar{
defaultRegexes.TaskVersion,
- taskExecutionIdentifier.TaskId.Version,
+ taskExecutionIdentifier.GetTaskId().GetVersion(),
},
TemplateVar{
defaultRegexes.TaskProject,
- taskExecutionIdentifier.TaskId.Project,
+ taskExecutionIdentifier.GetTaskId().GetProject(),
},
TemplateVar{
defaultRegexes.TaskDomain,
- taskExecutionIdentifier.TaskId.Domain,
+ taskExecutionIdentifier.GetTaskId().GetDomain(),
},
)
}
- if taskExecutionIdentifier.NodeExecutionId != nil && taskExecutionIdentifier.NodeExecutionId.ExecutionId != nil {
+ if taskExecutionIdentifier.GetNodeExecutionId() != nil && taskExecutionIdentifier.GetNodeExecutionId().GetExecutionId() != nil {
vars = append(
vars,
TemplateVar{
defaultRegexes.ExecutionName,
- taskExecutionIdentifier.NodeExecutionId.ExecutionId.Name,
+ taskExecutionIdentifier.GetNodeExecutionId().GetExecutionId().GetName(),
},
TemplateVar{
defaultRegexes.ExecutionProject,
- taskExecutionIdentifier.NodeExecutionId.ExecutionId.Project,
+ taskExecutionIdentifier.GetNodeExecutionId().GetExecutionId().GetProject(),
},
TemplateVar{
defaultRegexes.ExecutionDomain,
- taskExecutionIdentifier.NodeExecutionId.ExecutionId.Domain,
+ taskExecutionIdentifier.GetNodeExecutionId().GetExecutionId().GetDomain(),
},
)
}
diff --git a/flyteplugins/go/tasks/pluginmachinery/utils/secrets/marshaler.go b/flyteplugins/go/tasks/pluginmachinery/utils/secrets/marshaler.go
index b6ea59020b..91423a9929 100644
--- a/flyteplugins/go/tasks/pluginmachinery/utils/secrets/marshaler.go
+++ b/flyteplugins/go/tasks/pluginmachinery/utils/secrets/marshaler.go
@@ -53,8 +53,8 @@ func unmarshalSecret(encoded string) (*core.Secret, error) {
func MarshalSecretsToMapStrings(secrets []*core.Secret) (map[string]string, error) {
res := make(map[string]string, len(secrets))
for index, s := range secrets {
- if _, found := core.Secret_MountType_name[int32(s.MountRequirement)]; !found {
- return nil, fmt.Errorf("invalid mount requirement [%v]", s.MountRequirement)
+ if _, found := core.Secret_MountType_name[int32(s.GetMountRequirement())]; !found {
+ return nil, fmt.Errorf("invalid mount requirement [%v]", s.GetMountRequirement())
}
encodedSecret := marshalSecret(s)
diff --git a/flyteplugins/go/tasks/pluginmachinery/workqueue/queue.go b/flyteplugins/go/tasks/pluginmachinery/workqueue/queue.go
index fce1acde89..cf5fc8e451 100644
--- a/flyteplugins/go/tasks/pluginmachinery/workqueue/queue.go
+++ b/flyteplugins/go/tasks/pluginmachinery/workqueue/queue.go
@@ -227,6 +227,7 @@ func (q *queue) Start(ctx context.Context) error {
wrapper.retryCount++
wrapper.err = err
+ // #nosec G115
if wrapper.retryCount >= uint(q.maxRetries) {
logger.Debugf(ctx, "WorkItem [%v] exhausted all retries. Last Error: %v.",
wrapper.ID(), err)
diff --git a/flyteplugins/go/tasks/plugins/array/arraystatus/status_test.go b/flyteplugins/go/tasks/plugins/array/arraystatus/status_test.go
index 96aabcfcc6..a9ebea2825 100644
--- a/flyteplugins/go/tasks/plugins/array/arraystatus/status_test.go
+++ b/flyteplugins/go/tasks/plugins/array/arraystatus/status_test.go
@@ -29,7 +29,7 @@ func TestArrayStatus_HashCode(t *testing.T) {
})
t.Run("Populated Equal", func(t *testing.T) {
- expectedDetailed, err := bitarray.NewCompactArray(size, bitarray.Item(len(types.Phases)-1))
+ expectedDetailed, err := bitarray.NewCompactArray(size, bitarray.Item(len(types.Phases)-1)) // #nosec G115
assert.Nil(t, err)
expected := ArrayStatus{
Detailed: expectedDetailed,
@@ -37,7 +37,7 @@ func TestArrayStatus_HashCode(t *testing.T) {
expectedHashCode, err := expected.HashCode()
assert.Nil(t, err)
- actualDetailed, err := bitarray.NewCompactArray(size, bitarray.Item(len(types.Phases)-1))
+ actualDetailed, err := bitarray.NewCompactArray(size, bitarray.Item(len(types.Phases)-1)) // #nosec G115
assert.Nil(t, err)
actual := ArrayStatus{
Detailed: actualDetailed,
@@ -49,7 +49,7 @@ func TestArrayStatus_HashCode(t *testing.T) {
})
t.Run("Updated Not Equal", func(t *testing.T) {
- expectedDetailed, err := bitarray.NewCompactArray(size, bitarray.Item(len(types.Phases)-1))
+ expectedDetailed, err := bitarray.NewCompactArray(size, bitarray.Item(len(types.Phases)-1)) // #nosec G115
assert.Nil(t, err)
expectedDetailed.SetItem(0, uint64(1))
expected := ArrayStatus{
@@ -58,7 +58,7 @@ func TestArrayStatus_HashCode(t *testing.T) {
expectedHashCode, err := expected.HashCode()
assert.Nil(t, err)
- actualDetailed, err := bitarray.NewCompactArray(size, bitarray.Item(len(types.Phases)-1))
+ actualDetailed, err := bitarray.NewCompactArray(size, bitarray.Item(len(types.Phases)-1)) // #nosec G115
assert.Nil(t, err)
actual := ArrayStatus{
Detailed: actualDetailed,
@@ -70,7 +70,7 @@ func TestArrayStatus_HashCode(t *testing.T) {
})
t.Run("Updated Equal", func(t *testing.T) {
- expectedDetailed, err := bitarray.NewCompactArray(size, bitarray.Item(len(types.Phases)-1))
+ expectedDetailed, err := bitarray.NewCompactArray(size, bitarray.Item(len(types.Phases)-1)) // #nosec G115
assert.Nil(t, err)
expectedDetailed.SetItem(0, uint64(1))
expected := ArrayStatus{
@@ -79,7 +79,7 @@ func TestArrayStatus_HashCode(t *testing.T) {
expectedHashCode, err := expected.HashCode()
assert.Nil(t, err)
- actualDetailed, err := bitarray.NewCompactArray(size, bitarray.Item(len(types.Phases)-1))
+ actualDetailed, err := bitarray.NewCompactArray(size, bitarray.Item(len(types.Phases)-1)) // #nosec G115
actualDetailed.SetItem(0, uint64(1))
assert.Nil(t, err)
actual := ArrayStatus{
diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/client_test.go b/flyteplugins/go/tasks/plugins/array/awsbatch/client_test.go
index fe35f74e2a..e135aee020 100644
--- a/flyteplugins/go/tasks/plugins/array/awsbatch/client_test.go
+++ b/flyteplugins/go/tasks/plugins/array/awsbatch/client_test.go
@@ -29,7 +29,7 @@ func TestClient_SubmitJob(t *testing.T) {
c := NewCustomBatchClient(mocks.NewMockAwsBatchClient(), "account-id", "test-region", rateLimiter, rateLimiter).(*client)
store, err := NewJobStore(ctx, c, config.JobStoreConfig{
CacheSize: 1,
- Parallelizm: 1,
+ Parallelism: 1,
BatchChunkSize: 1,
ResyncPeriod: stdConfig.Duration{Duration: 1000},
}, EventHandler{}, promutils.NewTestScope())
diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/config/config.go b/flyteplugins/go/tasks/plugins/array/awsbatch/config/config.go
index 7b8a484140..7815c23ff5 100644
--- a/flyteplugins/go/tasks/plugins/array/awsbatch/config/config.go
+++ b/flyteplugins/go/tasks/plugins/array/awsbatch/config/config.go
@@ -30,7 +30,7 @@ type Config struct {
type JobStoreConfig struct {
CacheSize int `json:"jacheSize" pflag:",Maximum informer cache size as number of items. Caches are used as an optimization to lessen the load on AWS Services."`
- Parallelizm int `json:"parallelizm"`
+ Parallelism int `json:"parallelism"`
BatchChunkSize int `json:"batchChunkSize" pflag:",Determines the size of each batch sent to GetJobDetails api."`
ResyncPeriod config.Duration `json:"resyncPeriod" pflag:",Defines the duration for syncing job details from AWS Batch."`
}
@@ -39,7 +39,7 @@ var (
defaultConfig = &Config{
JobStoreConfig: JobStoreConfig{
CacheSize: 10000,
- Parallelizm: 20,
+ Parallelism: 20,
BatchChunkSize: 100,
ResyncPeriod: config.Duration{Duration: 30 * time.Second},
},
diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/config/config_flags.go b/flyteplugins/go/tasks/plugins/array/awsbatch/config/config_flags.go
index a4cb6b3b0c..5b482fb99d 100755
--- a/flyteplugins/go/tasks/plugins/array/awsbatch/config/config_flags.go
+++ b/flyteplugins/go/tasks/plugins/array/awsbatch/config/config_flags.go
@@ -51,7 +51,7 @@ func (Config) mustMarshalJSON(v json.Marshaler) string {
func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet {
cmdFlags := pflag.NewFlagSet("Config", pflag.ExitOnError)
cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "jobStoreConfig.jacheSize"), defaultConfig.JobStoreConfig.CacheSize, "Maximum informer cache size as number of items. Caches are used as an optimization to lessen the load on AWS Services.")
- cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "jobStoreConfig.parallelizm"), defaultConfig.JobStoreConfig.Parallelizm, "")
+ cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "jobStoreConfig.parallelism"), defaultConfig.JobStoreConfig.Parallelism, "")
cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "jobStoreConfig.batchChunkSize"), defaultConfig.JobStoreConfig.BatchChunkSize, "Determines the size of each batch sent to GetJobDetails api.")
cmdFlags.String(fmt.Sprintf("%v%v", prefix, "jobStoreConfig.resyncPeriod"), defaultConfig.JobStoreConfig.ResyncPeriod.String(), "Defines the duration for syncing job details from AWS Batch.")
cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "defCacheSize"), defaultConfig.JobDefCacheSize, "Maximum job definition cache size as number of items. Caches are used as an optimization to lessen the load on AWS Services.")
diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/config/config_flags_test.go b/flyteplugins/go/tasks/plugins/array/awsbatch/config/config_flags_test.go
index 62d8dc5ac2..9d06838911 100755
--- a/flyteplugins/go/tasks/plugins/array/awsbatch/config/config_flags_test.go
+++ b/flyteplugins/go/tasks/plugins/array/awsbatch/config/config_flags_test.go
@@ -113,14 +113,14 @@ func TestConfig_SetFlags(t *testing.T) {
}
})
})
- t.Run("Test_jobStoreConfig.parallelizm", func(t *testing.T) {
+ t.Run("Test_jobStoreConfig.parallelism", func(t *testing.T) {
t.Run("Override", func(t *testing.T) {
testValue := "1"
- cmdFlags.Set("jobStoreConfig.parallelizm", testValue)
- if vInt, err := cmdFlags.GetInt("jobStoreConfig.parallelizm"); err == nil {
- testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.JobStoreConfig.Parallelizm)
+ cmdFlags.Set("jobStoreConfig.parallelism", testValue)
+ if vInt, err := cmdFlags.GetInt("jobStoreConfig.parallelism"); err == nil {
+ testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.JobStoreConfig.Parallelism)
} else {
assert.FailNow(t, err.Error())
diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/job_config.go b/flyteplugins/go/tasks/plugins/array/awsbatch/job_config.go
index 1ca10cb39d..2831dd28ae 100644
--- a/flyteplugins/go/tasks/plugins/array/awsbatch/job_config.go
+++ b/flyteplugins/go/tasks/plugins/array/awsbatch/job_config.go
@@ -41,7 +41,7 @@ func (j *JobConfig) setKeyIfKnown(key, value string) bool {
func (j *JobConfig) MergeFromKeyValuePairs(pairs []*core.KeyValuePair) *JobConfig {
for _, entry := range pairs {
- j.setKeyIfKnown(entry.Key, entry.Value)
+ j.setKeyIfKnown(entry.GetKey(), entry.GetValue())
}
return j
diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/job_definition.go b/flyteplugins/go/tasks/plugins/array/awsbatch/job_definition.go
index 1ef9e4ec5b..acd5f124dd 100644
--- a/flyteplugins/go/tasks/plugins/array/awsbatch/job_definition.go
+++ b/flyteplugins/go/tasks/plugins/array/awsbatch/job_definition.go
@@ -19,8 +19,8 @@ const defaultComputeEngine = "EC2"
const platformCapabilitiesConfigKey = "platformCapabilities"
func getContainerImage(_ context.Context, task *core.TaskTemplate) string {
- if task.GetContainer() != nil && len(task.GetContainer().Image) > 0 {
- return task.GetContainer().Image
+ if task.GetContainer() != nil && len(task.GetContainer().GetImage()) > 0 {
+ return task.GetContainer().GetImage()
}
return ""
diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/jobs_store.go b/flyteplugins/go/tasks/plugins/array/awsbatch/jobs_store.go
index 16d44b490e..06a1d7d155 100644
--- a/flyteplugins/go/tasks/plugins/array/awsbatch/jobs_store.go
+++ b/flyteplugins/go/tasks/plugins/array/awsbatch/jobs_store.go
@@ -372,7 +372,7 @@ func NewJobStore(ctx context.Context, batchClient Client, cfg config.JobStoreCon
autoCache, err := cache.NewAutoRefreshBatchedCache("aws-batch-jobs", batchJobsForSync(ctx, cfg.BatchChunkSize),
syncBatches(ctx, store, handler, cfg.BatchChunkSize), workqueue.DefaultControllerRateLimiter(), cfg.ResyncPeriod.Duration,
- cfg.Parallelizm, cfg.CacheSize, scope)
+ uint(cfg.Parallelism), uint(cfg.CacheSize), scope) // #nosec G115
store.AutoRefresh = autoCache
return store, err
diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/jobs_store_test.go b/flyteplugins/go/tasks/plugins/array/awsbatch/jobs_store_test.go
index 8196925e15..122d03c71a 100644
--- a/flyteplugins/go/tasks/plugins/array/awsbatch/jobs_store_test.go
+++ b/flyteplugins/go/tasks/plugins/array/awsbatch/jobs_store_test.go
@@ -35,7 +35,7 @@ func newJobsStore(t testing.TB, batchClient Client) *JobStore {
func newJobsStoreWithSize(t testing.TB, batchClient Client, size int) *JobStore {
store, err := NewJobStore(context.TODO(), batchClient, config.JobStoreConfig{
CacheSize: size,
- Parallelizm: 1,
+ Parallelism: 1,
BatchChunkSize: 2,
ResyncPeriod: config2.Duration{Duration: 1000},
}, EventHandler{}, promutils.NewTestScope())
diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/launcher.go b/flyteplugins/go/tasks/plugins/array/awsbatch/launcher.go
index 609bab6cf7..d42c5ea0fe 100644
--- a/flyteplugins/go/tasks/plugins/array/awsbatch/launcher.go
+++ b/flyteplugins/go/tasks/plugins/array/awsbatch/launcher.go
@@ -33,8 +33,8 @@ func LaunchSubTasks(ctx context.Context, tCtx core.TaskExecutionContext, batchCl
}
// If the original job was marked as an array (not a single job), then make sure to set it up correctly.
- if t.Type == arrayTaskType {
- logger.Debugf(ctx, "Task is of type [%v]. Will setup task index env vars.", t.Type)
+ if t.GetType() == arrayTaskType {
+ logger.Debugf(ctx, "Task is of type [%v]. Will setup task index env vars.", t.GetType())
batchInput = UpdateBatchInputForArray(ctx, batchInput, int64(size))
}
@@ -46,7 +46,7 @@ func LaunchSubTasks(ctx context.Context, tCtx core.TaskExecutionContext, batchCl
metrics.SubTasksSubmitted.Add(ctx, float64(size))
- retryAttemptsArray, err := bitarray.NewCompactArray(uint(size), bitarray.Item(pluginConfig.MaxRetries))
+ retryAttemptsArray, err := bitarray.NewCompactArray(uint(size), bitarray.Item(pluginConfig.MaxRetries)) // #nosec G115
if err != nil {
logger.Errorf(context.Background(), "Failed to create attempts compact array with [count: %v, maxValue: %v]", size, pluginConfig.MaxRetries)
return nil, err
@@ -58,7 +58,7 @@ func LaunchSubTasks(ctx context.Context, tCtx core.TaskExecutionContext, batchCl
Summary: arraystatus.ArraySummary{
core.PhaseQueued: int64(size),
},
- Detailed: arrayCore.NewPhasesCompactArray(uint(size)),
+ Detailed: arrayCore.NewPhasesCompactArray(uint(size)), // #nosec G115
}).
SetReason("Successfully launched subtasks.").
SetRetryAttempts(retryAttemptsArray)
diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/monitor.go b/flyteplugins/go/tasks/plugins/array/awsbatch/monitor.go
index 62bc5103dc..d5c05f6cd0 100644
--- a/flyteplugins/go/tasks/plugins/array/awsbatch/monitor.go
+++ b/flyteplugins/go/tasks/plugins/array/awsbatch/monitor.go
@@ -45,7 +45,7 @@ func CheckSubTasksState(ctx context.Context, tCtx core.TaskExecutionContext, job
} else if taskTemplate == nil {
return nil, errors.Errorf(errors.BadTaskSpecification, "Required value not set, taskTemplate is nil")
}
- retry := toRetryStrategy(ctx, toBackoffLimit(taskTemplate.Metadata), cfg.MinRetries, cfg.MaxRetries)
+ retry := toRetryStrategy(ctx, toBackoffLimit(taskTemplate.GetMetadata()), cfg.MinRetries, cfg.MaxRetries)
// If job isn't currently being monitored (recovering from a restart?), add it to the sync-cache and return
if job == nil {
@@ -67,7 +67,7 @@ func CheckSubTasksState(ctx context.Context, tCtx core.TaskExecutionContext, job
msg := errorcollector.NewErrorMessageCollector()
newArrayStatus := arraystatus.ArrayStatus{
Summary: arraystatus.ArraySummary{},
- Detailed: arrayCore.NewPhasesCompactArray(uint(currentState.GetExecutionArraySize())),
+ Detailed: arrayCore.NewPhasesCompactArray(uint(currentState.GetExecutionArraySize())), // #nosec G115
}
currentSubTaskPhaseHash, err := currentState.GetArrayStatus().HashCode()
@@ -126,7 +126,7 @@ func CheckSubTasksState(ctx context.Context, tCtx core.TaskExecutionContext, job
}
}
- newArrayStatus.Detailed.SetItem(childIdx, bitarray.Item(actualPhase))
+ newArrayStatus.Detailed.SetItem(childIdx, bitarray.Item(actualPhase)) // #nosec G115
newArrayStatus.Summary.Inc(actualPhase)
parentState.RetryAttempts.SetItem(childIdx, bitarray.Item(len(subJob.Attempts)))
}
diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/task_links.go b/flyteplugins/go/tasks/plugins/array/awsbatch/task_links.go
index caf2e51a38..64b64ac168 100644
--- a/flyteplugins/go/tasks/plugins/array/awsbatch/task_links.go
+++ b/flyteplugins/go/tasks/plugins/array/awsbatch/task_links.go
@@ -89,9 +89,9 @@ func GetTaskLinks(ctx context.Context, taskMeta pluginCore.TaskExecutionMetadata
externalResources = append(externalResources, &pluginCore.ExternalResource{
ExternalID: subJob.ID,
- Index: uint32(originalIndex),
+ Index: uint32(originalIndex), // #nosec G115
Logs: subTaskLogLinks,
- RetryAttempt: uint32(retryAttempt),
+ RetryAttempt: uint32(retryAttempt), // #nosec G115
Phase: finalPhase,
})
}
diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/transformer.go b/flyteplugins/go/tasks/plugins/array/awsbatch/transformer.go
index 1eaef150d0..936269f2b1 100644
--- a/flyteplugins/go/tasks/plugins/array/awsbatch/transformer.go
+++ b/flyteplugins/go/tasks/plugins/array/awsbatch/transformer.go
@@ -109,9 +109,9 @@ func FlyteTaskToBatchInput(ctx context.Context, tCtx pluginCore.TaskExecutionCon
}
submitJobInput.SetJobName(tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName()).
SetJobDefinition(jobDefinition).SetJobQueue(jobConfig.DynamicTaskQueue).
- SetRetryStrategy(toRetryStrategy(ctx, toBackoffLimit(taskTemplate.Metadata), cfg.MinRetries, cfg.MaxRetries)).
+ SetRetryStrategy(toRetryStrategy(ctx, toBackoffLimit(taskTemplate.GetMetadata()), cfg.MinRetries, cfg.MaxRetries)).
SetContainerOverrides(toContainerOverrides(ctx, append(cmd, args...), &resources, envVars)).
- SetTimeout(toTimeout(taskTemplate.Metadata.GetTimeout(), cfg.DefaultTimeOut.Duration))
+ SetTimeout(toTimeout(taskTemplate.GetMetadata().GetTimeout(), cfg.DefaultTimeOut.Duration))
return submitJobInput, nil
}
@@ -159,7 +159,7 @@ func getEnvVarsForTask(ctx context.Context, execID pluginCore.TaskExecutionID, c
}
func toTimeout(templateTimeout *duration.Duration, defaultTimeout time.Duration) *batch.JobTimeout {
- if templateTimeout != nil && templateTimeout.Seconds > 0 {
+ if templateTimeout != nil && templateTimeout.GetSeconds() > 0 {
return (&batch.JobTimeout{}).SetAttemptDurationSeconds(templateTimeout.GetSeconds())
}
@@ -239,11 +239,11 @@ func toRetryStrategy(_ context.Context, backoffLimit *int32, minRetryAttempts, m
}
func toBackoffLimit(metadata *idlCore.TaskMetadata) *int32 {
- if metadata == nil || metadata.Retries == nil {
+ if metadata == nil || metadata.GetRetries() == nil {
return nil
}
- i := int32(metadata.Retries.Retries)
+ i := int32(metadata.GetRetries().GetRetries()) // #nosec G115
return &i
}
diff --git a/flyteplugins/go/tasks/plugins/array/awsbatch/transformer_test.go b/flyteplugins/go/tasks/plugins/array/awsbatch/transformer_test.go
index bbe8c88995..642493346e 100644
--- a/flyteplugins/go/tasks/plugins/array/awsbatch/transformer_test.go
+++ b/flyteplugins/go/tasks/plugins/array/awsbatch/transformer_test.go
@@ -198,7 +198,7 @@ func TestArrayJobToBatchInput(t *testing.T) {
batchInput, err := FlyteTaskToBatchInput(ctx, taskCtx, "", &config.Config{})
assert.NoError(t, err)
- batchInput = UpdateBatchInputForArray(ctx, batchInput, input.Size)
+ batchInput = UpdateBatchInputForArray(ctx, batchInput, input.GetSize())
assert.NotNil(t, batchInput)
assert.Equal(t, *expectedBatchInput, *batchInput)
diff --git a/flyteplugins/go/tasks/plugins/array/catalog.go b/flyteplugins/go/tasks/plugins/array/catalog.go
index d6bf5e8820..60b4b224ac 100644
--- a/flyteplugins/go/tasks/plugins/array/catalog.go
+++ b/flyteplugins/go/tasks/plugins/array/catalog.go
@@ -39,7 +39,7 @@ func DetermineDiscoverability(ctx context.Context, tCtx core.TaskExecutionContex
// Extract the custom plugin pb
var arrayJob *idlPlugins.ArrayJob
- if taskTemplate.Type == AwsBatchTaskType {
+ if taskTemplate.GetType() == AwsBatchTaskType {
arrayJob = &idlPlugins.ArrayJob{
Parallelism: 1,
Size: 1,
@@ -48,7 +48,7 @@ func DetermineDiscoverability(ctx context.Context, tCtx core.TaskExecutionContex
},
}
} else {
- arrayJob, err = arrayCore.ToArrayJob(taskTemplate.GetCustom(), taskTemplate.TaskTypeVersion)
+ arrayJob, err = arrayCore.ToArrayJob(taskTemplate.GetCustom(), taskTemplate.GetTaskTypeVersion())
}
if err != nil {
return state, err
@@ -58,9 +58,9 @@ func DetermineDiscoverability(ctx context.Context, tCtx core.TaskExecutionContex
var inputReaders []io.InputReader
// Save this in the state
- if taskTemplate.TaskTypeVersion == 0 {
- state = state.SetOriginalArraySize(arrayJob.Size)
- arrayJobSize = arrayJob.Size
+ if taskTemplate.GetTaskTypeVersion() == 0 {
+ state = state.SetOriginalArraySize(arrayJob.GetSize())
+ arrayJobSize = arrayJob.GetSize()
state = state.SetOriginalMinSuccesses(arrayJob.GetMinSuccesses())
// build input readers
@@ -77,15 +77,15 @@ func DetermineDiscoverability(ctx context.Context, tCtx core.TaskExecutionContex
// identify and validate the size of the array job
size := -1
var literalCollection *idlCore.LiteralCollection
- for _, literal := range inputs.Literals {
+ for _, literal := range inputs.GetLiterals() {
if literalCollection = literal.GetCollection(); literalCollection != nil {
// validate length of input list
- if size != -1 && size != len(literalCollection.Literals) {
+ if size != -1 && size != len(literalCollection.GetLiterals()) {
state = state.SetPhase(arrayCore.PhasePermanentFailure, 0).SetReason("all maptask input lists must be the same length")
return state, nil
}
- size = len(literalCollection.Literals)
+ size = len(literalCollection.GetLiterals())
}
}
@@ -106,7 +106,7 @@ func DetermineDiscoverability(ctx context.Context, tCtx core.TaskExecutionContex
arrayJobSize = int64(size)
// build input readers
- inputReaders = ConstructStaticInputReaders(tCtx.InputReader(), inputs.Literals, size)
+ inputReaders = ConstructStaticInputReaders(tCtx.InputReader(), inputs.GetLiterals(), size)
}
if arrayJobSize > maxArrayJobSize {
@@ -117,10 +117,10 @@ func DetermineDiscoverability(ctx context.Context, tCtx core.TaskExecutionContex
}
// If the task is not discoverable, then skip data catalog work and move directly to launch
- if taskTemplate.Metadata == nil || !taskTemplate.Metadata.Discoverable {
+ if taskTemplate.GetMetadata() == nil || !taskTemplate.GetMetadata().GetDiscoverable() {
logger.Infof(ctx, "Task is not discoverable, moving to launch phase...")
// Set an all set indexes to cache. This task won't try to write to catalog anyway.
- state = state.SetIndexesToCache(arrayCore.InvertBitSet(bitarray.NewBitSet(uint(arrayJobSize)), uint(arrayJobSize)))
+ state = state.SetIndexesToCache(arrayCore.InvertBitSet(bitarray.NewBitSet(uint(arrayJobSize)), uint(arrayJobSize))) // #nosec G115
state = state.SetPhase(arrayCore.PhasePreLaunch, core.DefaultPhaseVersion).SetReason("Task is not discoverable.")
state.SetExecutionArraySize(int(arrayJobSize))
@@ -165,7 +165,7 @@ func DetermineDiscoverability(ctx context.Context, tCtx core.TaskExecutionContex
// TODO: maybe add a config option to decide the behavior on catalog failure.
logger.Warnf(ctx, "Failing to lookup catalog. Will move on to launching the task. Error: %v", err)
- state = state.SetIndexesToCache(arrayCore.InvertBitSet(bitarray.NewBitSet(uint(arrayJobSize)), uint(arrayJobSize)))
+ state = state.SetIndexesToCache(arrayCore.InvertBitSet(bitarray.NewBitSet(uint(arrayJobSize)), uint(arrayJobSize))) // #nosec G115
state = state.SetExecutionArraySize(int(arrayJobSize))
state = state.SetPhase(arrayCore.PhasePreLaunch, core.DefaultPhaseVersion).SetReason(fmt.Sprintf("Skipping cache check due to err [%v]", err))
return state, nil
@@ -178,7 +178,7 @@ func DetermineDiscoverability(ctx context.Context, tCtx core.TaskExecutionContex
}
cachedResults := resp.GetCachedResults()
- state = state.SetIndexesToCache(arrayCore.InvertBitSet(cachedResults, uint(arrayJobSize)))
+ state = state.SetIndexesToCache(arrayCore.InvertBitSet(cachedResults, uint(arrayJobSize))) // #nosec G115
state = state.SetExecutionArraySize(int(arrayJobSize) - resp.GetCachedCount())
// If all the sub-tasks are actually done, then we can just move on.
@@ -223,14 +223,14 @@ func WriteToDiscovery(ctx context.Context, tCtx core.TaskExecutionContext, state
return state, externalResources, errors.Errorf(errors.BadTaskSpecification, "Required value not set, taskTemplate is nil")
}
- if tMeta := taskTemplate.Metadata; tMeta == nil || !tMeta.Discoverable {
+ if tMeta := taskTemplate.GetMetadata(); tMeta == nil || !tMeta.GetDiscoverable() {
logger.Debugf(ctx, "Task is not marked as discoverable. Moving to [%v] phase.", phaseOnSuccess)
return state.SetPhase(phaseOnSuccess, versionOnSuccess).SetReason("Task is not discoverable."), externalResources, nil
}
var inputReaders []io.InputReader
arrayJobSize := int(state.GetOriginalArraySize())
- if taskTemplate.TaskTypeVersion == 0 {
+ if taskTemplate.GetTaskTypeVersion() == 0 {
// input readers
inputReaders, err = ConstructRemoteFileInputReaders(ctx, tCtx.DataStore(), tCtx.InputReader().GetInputPrefixPath(), arrayJobSize)
if err != nil {
@@ -242,7 +242,7 @@ func WriteToDiscovery(ctx context.Context, tCtx core.TaskExecutionContext, state
return state, externalResources, errors.Errorf(errors.MetadataAccessFailed, "Could not read inputs and therefore failed to determine array job size")
}
- inputReaders = ConstructStaticInputReaders(tCtx.InputReader(), inputs.Literals, arrayJobSize)
+ inputReaders = ConstructStaticInputReaders(tCtx.InputReader(), inputs.GetLiterals(), arrayJobSize)
}
// output reader
@@ -251,8 +251,8 @@ func WriteToDiscovery(ctx context.Context, tCtx core.TaskExecutionContext, state
return nil, externalResources, err
}
- iface := *taskTemplate.Interface
- iface.Outputs = makeSingularTaskInterface(iface.Outputs)
+ iface := taskTemplate.GetInterface()
+ iface.Outputs = makeSingularTaskInterface(iface.GetOutputs())
// Do not cache failed tasks. Retrieve the final phase from array status and unset the non-successful ones.
@@ -262,14 +262,15 @@ func WriteToDiscovery(ctx context.Context, tCtx core.TaskExecutionContext, state
if !phase.IsSuccess() {
// tasksToCache is built on the originalArraySize and ArrayStatus.Detailed is the executionArraySize
originalIdx := arrayCore.CalculateOriginalIndex(idx, state.GetIndexesToCache())
- tasksToCache.Clear(uint(originalIdx))
+ tasksToCache.Clear(uint(originalIdx)) // #nosec G115
}
}
// Create catalog put items, but only put the ones that were not originally cached (as read from the catalog results bitset)
- catalogWriterItems, err := ConstructCatalogUploadRequests(*tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetID().TaskId,
- tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetID(), taskTemplate.Metadata.DiscoveryVersion,
- taskTemplate.Metadata.CacheIgnoreInputVars, iface, &tasksToCache, inputReaders, outputReaders)
+ //nolint:protogetter
+ catalogWriterItems, err := ConstructCatalogUploadRequests(tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetID().TaskId,
+ tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetID(), taskTemplate.GetMetadata().GetDiscoveryVersion(),
+ taskTemplate.GetMetadata().GetCacheIgnoreInputVars(), iface, &tasksToCache, inputReaders, outputReaders)
if err != nil {
return nil, externalResources, err
@@ -292,6 +293,7 @@ func WriteToDiscovery(ctx context.Context, tCtx core.TaskExecutionContext, state
externalResources = make([]*core.ExternalResource, 0)
for idx, phaseIdx := range state.ArrayStatus.Detailed.GetItems() {
originalIdx := arrayCore.CalculateOriginalIndex(idx, state.GetIndexesToCache())
+ // #nosec G115
if !tasksToCache.IsSet(uint(originalIdx)) {
continue
}
@@ -299,8 +301,8 @@ func WriteToDiscovery(ctx context.Context, tCtx core.TaskExecutionContext, state
externalResources = append(externalResources,
&core.ExternalResource{
CacheStatus: idlCore.CatalogCacheStatus_CACHE_POPULATED,
- Index: uint32(originalIdx),
- RetryAttempt: uint32(state.RetryAttempts.GetItem(idx)),
+ Index: uint32(originalIdx), // #nosec G115
+ RetryAttempt: uint32(state.RetryAttempts.GetItem(idx)), // #nosec G115
Phase: core.Phases[phaseIdx],
},
)
@@ -337,8 +339,8 @@ func WriteToCatalog(ctx context.Context, ownerSignal core.SignalAsync, catalogCl
return false, nil
}
-func ConstructCatalogUploadRequests(keyID idlCore.Identifier, taskExecID idlCore.TaskExecutionIdentifier,
- cacheVersion string, cacheIgnoreInputVars []string, taskInterface idlCore.TypedInterface, whichTasksToCache *bitarray.BitSet,
+func ConstructCatalogUploadRequests(keyID *idlCore.Identifier, taskExecID idlCore.TaskExecutionIdentifier,
+ cacheVersion string, cacheIgnoreInputVars []string, taskInterface *idlCore.TypedInterface, whichTasksToCache *bitarray.BitSet,
inputReaders []io.InputReader, outputReaders []io.OutputReader) ([]catalog.UploadRequest, error) {
writerWorkItems := make([]catalog.UploadRequest, 0, len(inputReaders))
@@ -349,17 +351,18 @@ func ConstructCatalogUploadRequests(keyID idlCore.Identifier, taskExecID idlCore
}
for idx, input := range inputReaders {
+ // #nosec G115
if !whichTasksToCache.IsSet(uint(idx)) {
continue
}
wi := catalog.UploadRequest{
Key: catalog.Key{
- Identifier: keyID,
+ Identifier: *keyID,
InputReader: input,
CacheVersion: cacheVersion,
CacheIgnoreInputVars: cacheIgnoreInputVars,
- TypedInterface: taskInterface,
+ TypedInterface: *taskInterface,
},
ArtifactData: outputReaders[idx],
ArtifactMetadata: catalog.Metadata{
@@ -400,6 +403,7 @@ func NewLiteralScalarOfInteger(number int64) *idlCore.Literal {
func CatalogBitsetToLiteralCollection(catalogResults *bitarray.BitSet, size int) *idlCore.LiteralCollection {
literals := make([]*idlCore.Literal, 0, size)
for i := 0; i < size; i++ {
+ // #nosec G115
if !catalogResults.IsSet(uint(i)) {
literals = append(literals, NewLiteralScalarOfInteger(int64(i)))
}
@@ -410,15 +414,15 @@ func CatalogBitsetToLiteralCollection(catalogResults *bitarray.BitSet, size int)
}
func makeSingularTaskInterface(varMap *idlCore.VariableMap) *idlCore.VariableMap {
- if varMap == nil || len(varMap.Variables) == 0 {
+ if varMap == nil || len(varMap.GetVariables()) == 0 {
return varMap
}
res := &idlCore.VariableMap{
- Variables: make(map[string]*idlCore.Variable, len(varMap.Variables)),
+ Variables: make(map[string]*idlCore.Variable, len(varMap.GetVariables())),
}
- for key, val := range varMap.Variables {
+ for key, val := range varMap.GetVariables() {
if val.GetType().GetCollectionType() != nil {
res.Variables[key] = &idlCore.Variable{Type: val.GetType().GetCollectionType()}
} else {
@@ -440,17 +444,17 @@ func ConstructCatalogReaderWorkItems(ctx context.Context, taskReader core.TaskRe
workItems := make([]catalog.DownloadRequest, 0, len(inputs))
- iface := *t.Interface
- iface.Outputs = makeSingularTaskInterface(iface.Outputs)
+ iface := t.GetInterface()
+ iface.Outputs = makeSingularTaskInterface(iface.GetOutputs())
for idx, inputReader := range inputs {
// TODO: Check if Identifier or Interface are empty and return err
item := catalog.DownloadRequest{
Key: catalog.Key{
- Identifier: *t.Id,
- CacheVersion: t.GetMetadata().DiscoveryVersion,
+ Identifier: *t.Id, //nolint:protogetter
+ CacheVersion: t.GetMetadata().GetDiscoveryVersion(),
InputReader: inputReader,
- TypedInterface: iface,
+ TypedInterface: *iface,
},
Target: outputs[idx],
}
@@ -471,7 +475,7 @@ func ConstructStaticInputReaders(inputPaths io.InputFilePaths, inputLiterals map
for inputName, inputLiteral := range inputLiterals {
if literalCollection = inputLiteral.GetCollection(); literalCollection != nil {
// if literal is a collection then we need to retrieve the specific literal for this subtask index
- literals[inputName] = literalCollection.Literals[i]
+ literals[inputName] = literalCollection.GetLiterals()[i]
} else {
literals[inputName] = inputLiteral
}
diff --git a/flyteplugins/go/tasks/plugins/array/catalog_test.go b/flyteplugins/go/tasks/plugins/array/catalog_test.go
index 15a36a4dcf..296d2283d4 100644
--- a/flyteplugins/go/tasks/plugins/array/catalog_test.go
+++ b/flyteplugins/go/tasks/plugins/array/catalog_test.go
@@ -102,19 +102,19 @@ var (
func TestNewLiteralScalarOfInteger(t *testing.T) {
l := NewLiteralScalarOfInteger(int64(65))
- assert.Equal(t, int64(65), l.Value.(*core.Literal_Scalar).Scalar.Value.(*core.Scalar_Primitive).
- Primitive.Value.(*core.Primitive_Integer).Integer)
+ assert.Equal(t, int64(65), l.GetValue().(*core.Literal_Scalar).Scalar.GetValue().(*core.Scalar_Primitive).
+ Primitive.GetValue().(*core.Primitive_Integer).Integer)
}
func TestCatalogBitsetToLiteralCollection(t *testing.T) {
ba := bitarray.NewBitSet(3)
ba.Set(1)
lc := CatalogBitsetToLiteralCollection(ba, 3)
- assert.Equal(t, 2, len(lc.Literals))
- assert.Equal(t, int64(0), lc.Literals[0].Value.(*core.Literal_Scalar).Scalar.Value.(*core.Scalar_Primitive).
- Primitive.Value.(*core.Primitive_Integer).Integer)
- assert.Equal(t, int64(2), lc.Literals[1].Value.(*core.Literal_Scalar).Scalar.Value.(*core.Scalar_Primitive).
- Primitive.Value.(*core.Primitive_Integer).Integer)
+ assert.Equal(t, 2, len(lc.GetLiterals()))
+ assert.Equal(t, int64(0), lc.GetLiterals()[0].GetValue().(*core.Literal_Scalar).Scalar.GetValue().(*core.Scalar_Primitive).
+ Primitive.GetValue().(*core.Primitive_Integer).Integer)
+ assert.Equal(t, int64(2), lc.GetLiterals()[1].GetValue().(*core.Literal_Scalar).Scalar.GetValue().(*core.Scalar_Primitive).
+ Primitive.GetValue().(*core.Primitive_Integer).Integer)
}
func runDetermineDiscoverabilityTest(t testing.TB, taskTemplate *core.TaskTemplate, future catalog.DownloadFuture,
diff --git a/flyteplugins/go/tasks/plugins/array/core/metadata.go b/flyteplugins/go/tasks/plugins/array/core/metadata.go
index 4ac7c71b4c..fcbaa3456d 100644
--- a/flyteplugins/go/tasks/plugins/array/core/metadata.go
+++ b/flyteplugins/go/tasks/plugins/array/core/metadata.go
@@ -29,10 +29,11 @@ func InitializeExternalResources(ctx context.Context, tCtx core.TaskExecutionCon
var childIndex int
var phase core.Phase
+ // #nosec G115
if state.IndexesToCache.IsSet(uint(i)) {
// if not cached set to PhaseUndefined and set cacheStatus according to Discoverable
phase = core.PhaseUndefined
- if taskTemplate.Metadata == nil || !taskTemplate.Metadata.Discoverable {
+ if taskTemplate.GetMetadata() == nil || !taskTemplate.GetMetadata().GetDiscoverable() {
cacheStatus = idlCore.CatalogCacheStatus_CACHE_DISABLED
} else {
cacheStatus = idlCore.CatalogCacheStatus_CACHE_MISS
@@ -54,7 +55,7 @@ func InitializeExternalResources(ctx context.Context, tCtx core.TaskExecutionCon
externalResources[i] = &core.ExternalResource{
ExternalID: subTaskID,
CacheStatus: cacheStatus,
- Index: uint32(i),
+ Index: uint32(i), // #nosec G115
Logs: nil,
RetryAttempt: 0,
Phase: phase,
diff --git a/flyteplugins/go/tasks/plugins/array/core/metadata_test.go b/flyteplugins/go/tasks/plugins/array/core/metadata_test.go
index 262bd3b822..370af258c7 100644
--- a/flyteplugins/go/tasks/plugins/array/core/metadata_test.go
+++ b/flyteplugins/go/tasks/plugins/array/core/metadata_test.go
@@ -17,9 +17,9 @@ func TestInitializeExternalResources(t *testing.T) {
subTaskCount := 10
cachedCount := 4
- indexesToCache := InvertBitSet(bitarray.NewBitSet(uint(subTaskCount)), uint(subTaskCount))
+ indexesToCache := InvertBitSet(bitarray.NewBitSet(uint(subTaskCount)), uint(subTaskCount)) // #nosec G115
for i := 0; i < cachedCount; i++ {
- indexesToCache.Clear(uint(i))
+ indexesToCache.Clear(uint(i)) // #nosec G115
}
tr := &mocks.TaskReader{}
@@ -54,7 +54,7 @@ func TestInitializeExternalResources(t *testing.T) {
assert.Nil(t, err)
assert.Equal(t, subTaskCount, len(externalResources))
for i, externalResource := range externalResources {
- assert.Equal(t, uint32(i), externalResource.Index)
+ assert.Equal(t, uint32(i), externalResource.Index) // #nosec G115
assert.Equal(t, 0, len(externalResource.Logs))
assert.Equal(t, uint32(0), externalResource.RetryAttempt)
if i < cachedCount {
diff --git a/flyteplugins/go/tasks/plugins/array/core/state.go b/flyteplugins/go/tasks/plugins/array/core/state.go
index a540359b0a..8fcc85946b 100644
--- a/flyteplugins/go/tasks/plugins/array/core/state.go
+++ b/flyteplugins/go/tasks/plugins/array/core/state.go
@@ -303,7 +303,7 @@ func InvertBitSet(input *bitarray.BitSet, limit uint) *bitarray.BitSet {
func NewPhasesCompactArray(count uint) bitarray.CompactArray {
// TODO: This is fragile, we should introduce a TaskPhaseCount as the last element in the enum
- a, err := bitarray.NewCompactArray(count, bitarray.Item(len(core.Phases)-1))
+ a, err := bitarray.NewCompactArray(count, bitarray.Item(len(core.Phases)-1)) // #nosec G115
if err != nil {
logger.Warnf(context.Background(), "Failed to create compact array with provided parameters [count: %v]",
count)
@@ -322,7 +322,7 @@ func CalculateOriginalIndex(childIdx int, toCache *bitarray.BitSet) int {
}
if childIdx+1 == sum {
- return int(i)
+ return int(i) // #nosec G115
}
}
diff --git a/flyteplugins/go/tasks/plugins/array/core/state_test.go b/flyteplugins/go/tasks/plugins/array/core/state_test.go
index 969c98df20..84ac17d315 100644
--- a/flyteplugins/go/tasks/plugins/array/core/state_test.go
+++ b/flyteplugins/go/tasks/plugins/array/core/state_test.go
@@ -27,7 +27,7 @@ func TestInvertBitSet(t *testing.T) {
assertBitSetsEqual(t, expected, actual, 4)
}
-func assertBitSetsEqual(t testing.TB, b1, b2 *bitarray.BitSet, len int) {
+func assertBitSetsEqual(t testing.TB, b1, b2 *bitarray.BitSet, len uint) {
if b1 == nil {
assert.Nil(t, b2)
} else if b2 == nil {
@@ -35,7 +35,7 @@ func assertBitSetsEqual(t testing.TB, b1, b2 *bitarray.BitSet, len int) {
}
assert.Equal(t, b1.Cap(), b2.Cap())
- for i := uint(0); i < uint(len); i++ {
+ for i := uint(0); i < len; i++ {
assert.Equal(t, b1.IsSet(i), b2.IsSet(i), "At index %v", i)
}
}
@@ -43,11 +43,11 @@ func assertBitSetsEqual(t testing.TB, b1, b2 *bitarray.BitSet, len int) {
func TestMapArrayStateToPluginPhase(t *testing.T) {
ctx := context.Background()
- subTaskCount := 3
+ subTaskCount := uint(3)
- detailedArray := NewPhasesCompactArray(uint(subTaskCount))
- indexesToCache := InvertBitSet(bitarray.NewBitSet(uint(subTaskCount)), uint(subTaskCount))
- retryAttemptsArray, err := bitarray.NewCompactArray(uint(subTaskCount), bitarray.Item(1))
+ detailedArray := NewPhasesCompactArray(subTaskCount)
+ indexesToCache := InvertBitSet(bitarray.NewBitSet(subTaskCount), subTaskCount)
+ retryAttemptsArray, err := bitarray.NewCompactArray(subTaskCount, bitarray.Item(1))
assert.NoError(t, err)
t.Run("start", func(t *testing.T) {
diff --git a/flyteplugins/go/tasks/plugins/array/inputs.go b/flyteplugins/go/tasks/plugins/array/inputs.go
index e0a7035181..8e4e746530 100644
--- a/flyteplugins/go/tasks/plugins/array/inputs.go
+++ b/flyteplugins/go/tasks/plugins/array/inputs.go
@@ -20,7 +20,7 @@ func (i arrayJobInputReader) GetInputPath() storage.DataReference {
}
func GetInputReader(tCtx core.TaskExecutionContext, taskTemplate *idlCore.TaskTemplate) io.InputReader {
- if taskTemplate.GetTaskTypeVersion() == 0 && taskTemplate.Type != AwsBatchTaskType {
+ if taskTemplate.GetTaskTypeVersion() == 0 && taskTemplate.GetType() != AwsBatchTaskType {
// Prior to task type version == 1, dynamic type tasks (including array tasks) would write input files for each
// individual array task instance. In this case we use a modified input reader to only pass in the parent input
// directory.
diff --git a/flyteplugins/go/tasks/plugins/array/k8s/management.go b/flyteplugins/go/tasks/plugins/array/k8s/management.go
index 12eea118cc..e64c3e601a 100644
--- a/flyteplugins/go/tasks/plugins/array/k8s/management.go
+++ b/flyteplugins/go/tasks/plugins/array/k8s/management.go
@@ -69,7 +69,7 @@ func LaunchAndCheckSubTasksState(ctx context.Context, tCtx core.TaskExecutionCon
messageCollector := errorcollector.NewErrorMessageCollector()
newArrayStatus := &arraystatus.ArrayStatus{
Summary: arraystatus.ArraySummary{},
- Detailed: arrayCore.NewPhasesCompactArray(uint(currentState.GetExecutionArraySize())),
+ Detailed: arrayCore.NewPhasesCompactArray(uint(currentState.GetExecutionArraySize())), // #nosec G115
}
externalResources = make([]*core.ExternalResource, 0, len(currentState.GetArrayStatus().Detailed.GetItems()))
@@ -82,7 +82,7 @@ func LaunchAndCheckSubTasksState(ctx context.Context, tCtx core.TaskExecutionCon
// If the current State is newly minted then we must initialize RetryAttempts to track how many
// times each subtask is executed.
if len(currentState.RetryAttempts.GetItems()) == 0 {
- count := uint(currentState.GetExecutionArraySize())
+ count := uint(currentState.GetExecutionArraySize()) // #nosec G115
maxValue := bitarray.Item(tCtx.TaskExecutionMetadata().GetMaxAttempts())
retryAttemptsArray, err := bitarray.NewCompactArray(count, maxValue)
@@ -104,7 +104,7 @@ func LaunchAndCheckSubTasksState(ctx context.Context, tCtx core.TaskExecutionCon
// times the subtask failed due to system issues, this is necessary to correctly evaluate
// interruptible subtasks.
if len(currentState.SystemFailures.GetItems()) == 0 {
- count := uint(currentState.GetExecutionArraySize())
+ count := uint(currentState.GetExecutionArraySize()) // #nosec G115
maxValue := bitarray.Item(tCtx.TaskExecutionMetadata().GetMaxAttempts())
systemFailuresArray, err := bitarray.NewCompactArray(count, maxValue)
@@ -134,13 +134,13 @@ func LaunchAndCheckSubTasksState(ctx context.Context, tCtx core.TaskExecutionCon
return currentState, externalResources, errors.Errorf(errors.BadTaskSpecification, "Required value not set, taskTemplate is nil")
}
- arrayJob, err := arrayCore.ToArrayJob(taskTemplate.GetCustom(), taskTemplate.TaskTypeVersion)
+ arrayJob, err := arrayCore.ToArrayJob(taskTemplate.GetCustom(), taskTemplate.GetTaskTypeVersion())
if err != nil {
return currentState, externalResources, err
}
currentParallelism := 0
- maxParallelism := int(arrayJob.Parallelism)
+ maxParallelism := int(arrayJob.GetParallelism())
currentSubTaskPhaseHash, err := currentState.GetArrayStatus().HashCode()
if err != nil {
@@ -155,7 +155,7 @@ func LaunchAndCheckSubTasksState(ctx context.Context, tCtx core.TaskExecutionCon
retryAttempt++
newState.RetryAttempts.SetItem(childIdx, retryAttempt)
} else if existingPhase.IsTerminal() {
- newArrayStatus.Detailed.SetItem(childIdx, bitarray.Item(existingPhase))
+ newArrayStatus.Detailed.SetItem(childIdx, bitarray.Item(existingPhase)) // #nosec G115
continue
}
@@ -246,12 +246,13 @@ func LaunchAndCheckSubTasksState(ctx context.Context, tCtx core.TaskExecutionCon
}
}
+ // #nosec G115
if actualPhase == core.PhaseRetryableFailure && uint32(retryAttempt+1) >= stCtx.TaskExecutionMetadata().GetMaxAttempts() {
// If we see a retryable failure we must check if the number of retries exceeds the maximum
// attempts. If so, transition to a permanent failure so that is not attempted again.
actualPhase = core.PhasePermanentFailure
}
- newArrayStatus.Detailed.SetItem(childIdx, bitarray.Item(actualPhase))
+ newArrayStatus.Detailed.SetItem(childIdx, bitarray.Item(actualPhase)) // #nosec G115
if actualPhase.IsTerminal() {
err = deallocateResource(ctx, stCtx, config, podName)
@@ -275,9 +276,9 @@ func LaunchAndCheckSubTasksState(ctx context.Context, tCtx core.TaskExecutionCon
externalResources = append(externalResources, &core.ExternalResource{
ExternalID: podName,
- Index: uint32(originalIdx),
+ Index: uint32(originalIdx), // #nosec G115
Logs: logLinks,
- RetryAttempt: uint32(retryAttempt),
+ RetryAttempt: uint32(retryAttempt), // #nosec G115
Phase: actualPhase,
})
@@ -383,15 +384,15 @@ func TerminateSubTasks(ctx context.Context, tCtx core.TaskExecutionContext, kube
} else {
externalResources = append(externalResources, &core.ExternalResource{
ExternalID: stCtx.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName(),
- Index: uint32(originalIdx),
- RetryAttempt: uint32(retryAttempt),
+ Index: uint32(originalIdx), // #nosec G115
+ RetryAttempt: uint32(retryAttempt), // #nosec G115
Phase: core.PhaseAborted,
})
}
}
if messageCollector.Length() > 0 {
- return currentState, externalResources, fmt.Errorf(messageCollector.Summary(config.MaxErrorStringLength))
+ return currentState, externalResources, fmt.Errorf(messageCollector.Summary(config.MaxErrorStringLength)) //nolint
}
return currentState.SetPhase(arrayCore.PhaseWriteToDiscoveryThenFail, currentState.PhaseVersion+1), externalResources, nil
diff --git a/flyteplugins/go/tasks/plugins/array/k8s/management_test.go b/flyteplugins/go/tasks/plugins/array/k8s/management_test.go
index 7100fbc34c..d1628f98a2 100644
--- a/flyteplugins/go/tasks/plugins/array/k8s/management_test.go
+++ b/flyteplugins/go/tasks/plugins/array/k8s/management_test.go
@@ -217,8 +217,10 @@ func TestCheckSubTasksState(t *testing.T) {
OriginalArraySize: int64(subtaskCount),
OriginalMinSuccesses: int64(subtaskCount),
ArrayStatus: arraystatus.ArrayStatus{
+ // #nosec G115
Detailed: arrayCore.NewPhasesCompactArray(uint(subtaskCount)), // set all tasks to core.PhaseUndefined
},
+ // #nosec G115
IndexesToCache: arrayCore.InvertBitSet(bitarray.NewBitSet(uint(subtaskCount)), uint(subtaskCount)), // set all tasks to be cached
}
@@ -254,8 +256,10 @@ func TestCheckSubTasksState(t *testing.T) {
OriginalArraySize: int64(subtaskCount),
OriginalMinSuccesses: int64(subtaskCount),
ArrayStatus: arraystatus.ArrayStatus{
+ // #nosec G115
Detailed: arrayCore.NewPhasesCompactArray(uint(subtaskCount)), // set all tasks to core.PhaseUndefined
},
+ // #nosec G115
IndexesToCache: arrayCore.InvertBitSet(bitarray.NewBitSet(uint(subtaskCount)), uint(subtaskCount)), // set all tasks to be cached
}
@@ -296,8 +300,10 @@ func TestCheckSubTasksState(t *testing.T) {
OriginalArraySize: int64(subtaskCount),
OriginalMinSuccesses: int64(subtaskCount),
ArrayStatus: arraystatus.ArrayStatus{
+ // #nosec G115
Detailed: arrayCore.NewPhasesCompactArray(uint(subtaskCount)), // set all tasks to core.PhaseUndefined
},
+ // #nosec G115
IndexesToCache: arrayCore.InvertBitSet(bitarray.NewBitSet(uint(subtaskCount)), uint(subtaskCount)), // set all tasks to be cached
}
@@ -342,12 +348,12 @@ func TestCheckSubTasksState(t *testing.T) {
tCtx := getMockTaskExecutionContext(ctx, 0)
tCtx.OnResourceManager().Return(&resourceManager)
- detailed := arrayCore.NewPhasesCompactArray(uint(subtaskCount))
+ detailed := arrayCore.NewPhasesCompactArray(uint(subtaskCount)) // #nosec G115
for i := 0; i < subtaskCount; i++ {
detailed.SetItem(i, bitarray.Item(core.PhaseRetryableFailure)) // set all tasks to core.PhaseRetryableFailure
}
- retryAttemptsArray, err := bitarray.NewCompactArray(uint(subtaskCount), bitarray.Item(1))
+ retryAttemptsArray, err := bitarray.NewCompactArray(uint(subtaskCount), bitarray.Item(1)) // #nosec G115
assert.NoError(t, err)
currentState := &arrayCore.State{
@@ -358,6 +364,7 @@ func TestCheckSubTasksState(t *testing.T) {
ArrayStatus: arraystatus.ArrayStatus{
Detailed: detailed,
},
+ // #nosec G115
IndexesToCache: arrayCore.InvertBitSet(bitarray.NewBitSet(uint(subtaskCount)), uint(subtaskCount)), // set all tasks to be cached
RetryAttempts: retryAttemptsArray,
}
@@ -411,8 +418,9 @@ func TestCheckSubTasksState(t *testing.T) {
tCtx := getMockTaskExecutionContext(ctx, 0)
tCtx.OnResourceManager().Return(&resourceManager)
- detailed := arrayCore.NewPhasesCompactArray(uint(subtaskCount))
+ detailed := arrayCore.NewPhasesCompactArray(uint(subtaskCount)) // #nosec G115
for i := 0; i < subtaskCount; i++ {
+ // #nosec G115
detailed.SetItem(i, bitarray.Item(core.PhaseRunning)) // set all tasks to core.PhaseRunning
}
@@ -424,6 +432,7 @@ func TestCheckSubTasksState(t *testing.T) {
ArrayStatus: arraystatus.ArrayStatus{
Detailed: detailed,
},
+ // #nosec G115
IndexesToCache: arrayCore.InvertBitSet(bitarray.NewBitSet(uint(subtaskCount)), uint(subtaskCount)), // set all tasks to be cached
}
@@ -445,10 +454,10 @@ func TestCheckSubTasksState(t *testing.T) {
logLinks := externalResource.Logs
assert.Equal(t, 2, len(logLinks))
- assert.Equal(t, fmt.Sprintf("Kubernetes Logs #0-%d", i), logLinks[0].Name)
- assert.Equal(t, fmt.Sprintf("k8s/log/a-n-b/notfound-%d/pod?namespace=a-n-b", i), logLinks[0].Uri)
- assert.Equal(t, fmt.Sprintf("Cloudwatch Logs #0-%d", i), logLinks[1].Name)
- assert.Equal(t, fmt.Sprintf("https://console.aws.amazon.com/cloudwatch/home?region=us-east-1#logStream:group=/kubernetes/flyte;prefix=var.log.containers.notfound-%d;streamFilter=typeLogStreamPrefix", i), logLinks[1].Uri)
+ assert.Equal(t, fmt.Sprintf("Kubernetes Logs #0-%d", i), logLinks[0].GetName())
+ assert.Equal(t, fmt.Sprintf("k8s/log/a-n-b/notfound-%d/pod?namespace=a-n-b", i), logLinks[0].GetUri())
+ assert.Equal(t, fmt.Sprintf("Cloudwatch Logs #0-%d", i), logLinks[1].GetName())
+ assert.Equal(t, fmt.Sprintf("https://console.aws.amazon.com/cloudwatch/home?region=us-east-1#logStream:group=/kubernetes/flyte;prefix=var.log.containers.notfound-%d;streamFilter=typeLogStreamPrefix", i), logLinks[1].GetUri())
}
})
@@ -464,12 +473,13 @@ func TestCheckSubTasksState(t *testing.T) {
tCtx := getMockTaskExecutionContext(ctx, 0)
tCtx.OnResourceManager().Return(&resourceManager)
- detailed := arrayCore.NewPhasesCompactArray(uint(subtaskCount))
+ detailed := arrayCore.NewPhasesCompactArray(uint(subtaskCount)) // #nosec G115
for i := 0; i < subtaskCount; i++ {
+ // #nosec G115
detailed.SetItem(i, bitarray.Item(core.PhaseRunning)) // set all tasks to core.PhaseRunning
}
- retryAttemptsArray, err := bitarray.NewCompactArray(uint(subtaskCount), bitarray.Item(1))
+ retryAttemptsArray, err := bitarray.NewCompactArray(uint(subtaskCount), bitarray.Item(1)) // #nosec G115
assert.NoError(t, err)
currentState := &arrayCore.State{
@@ -480,6 +490,7 @@ func TestCheckSubTasksState(t *testing.T) {
ArrayStatus: arraystatus.ArrayStatus{
Detailed: detailed,
},
+ // #nosec G115
IndexesToCache: arrayCore.InvertBitSet(bitarray.NewBitSet(uint(subtaskCount)), uint(subtaskCount)), // set all tasks to be cached
RetryAttempts: retryAttemptsArray,
}
@@ -509,11 +520,13 @@ func TestCheckSubTasksState(t *testing.T) {
tCtx := getMockTaskExecutionContext(ctx, 0)
tCtx.OnResourceManager().Return(&resourceManager)
+ // #nosec G115
detailed := arrayCore.NewPhasesCompactArray(uint(subtaskCount))
for i := 0; i < subtaskCount; i++ {
detailed.SetItem(i, bitarray.Item(core.PhaseRunning)) // set all tasks to core.PhaseRunning
}
+ // #nosec G115
retryAttemptsArray, err := bitarray.NewCompactArray(uint(subtaskCount), bitarray.Item(1))
assert.NoError(t, err)
@@ -529,6 +542,7 @@ func TestCheckSubTasksState(t *testing.T) {
ArrayStatus: arraystatus.ArrayStatus{
Detailed: detailed,
},
+ // #nosec G115
IndexesToCache: arrayCore.InvertBitSet(bitarray.NewBitSet(uint(subtaskCount)), uint(subtaskCount)), // set all tasks to be cached
RetryAttempts: retryAttemptsArray,
}
@@ -561,7 +575,7 @@ func TestTerminateSubTasksOnAbort(t *testing.T) {
kubeClient.OnGetClient().Return(mocks.NewFakeKubeClient())
kubeClient.OnGetCache().Return(mocks.NewFakeKubeCache())
- compactArray := arrayCore.NewPhasesCompactArray(uint(subtaskCount))
+ compactArray := arrayCore.NewPhasesCompactArray(uint(subtaskCount)) // #nosec G115
for i := 0; i < subtaskCount; i++ {
compactArray.SetItem(i, 5)
}
@@ -574,6 +588,7 @@ func TestTerminateSubTasksOnAbort(t *testing.T) {
ArrayStatus: arraystatus.ArrayStatus{
Detailed: compactArray,
},
+ // #nosec G115
IndexesToCache: arrayCore.InvertBitSet(bitarray.NewBitSet(uint(subtaskCount)), uint(subtaskCount)),
}
@@ -652,9 +667,10 @@ func TestTerminateSubTasks(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
+ // #nosec G115
compactArray := arrayCore.NewPhasesCompactArray(uint(subtaskCount))
for i, phaseIdx := range test.initialPhaseIndices {
- compactArray.SetItem(i, bitarray.Item(phaseIdx))
+ compactArray.SetItem(i, bitarray.Item(phaseIdx)) // #nosec G115
}
currentState := &arrayCore.State{
CurrentPhase: arrayCore.PhaseCheckingSubTaskExecutions,
@@ -665,6 +681,7 @@ func TestTerminateSubTasks(t *testing.T) {
ArrayStatus: arraystatus.ArrayStatus{
Detailed: compactArray,
},
+ // #nosec G115
IndexesToCache: arrayCore.InvertBitSet(bitarray.NewBitSet(uint(subtaskCount)), uint(subtaskCount)),
}
diff --git a/flyteplugins/go/tasks/plugins/array/k8s/subtask_exec_context.go b/flyteplugins/go/tasks/plugins/array/k8s/subtask_exec_context.go
index b76fe70d28..d0e483257d 100644
--- a/flyteplugins/go/tasks/plugins/array/k8s/subtask_exec_context.go
+++ b/flyteplugins/go/tasks/plugins/array/k8s/subtask_exec_context.go
@@ -192,7 +192,7 @@ func (s SubTaskExecutionID) TemplateVarsByScheme() []tasklog.TemplateVar {
{Regex: LogTemplateRegexes.ParentName, Value: s.parentName},
{
Regex: LogTemplateRegexes.ExecutionIndex,
- Value: strconv.FormatUint(uint64(s.executionIndex), 10),
+ Value: strconv.FormatUint(uint64(s.executionIndex), 10), // #nosec G115
},
{
Regex: LogTemplateRegexes.RetryAttempt,
@@ -212,7 +212,7 @@ func NewSubTaskExecutionID(taskExecutionID pluginsCore.TaskExecutionID, executio
executionIndex,
taskExecutionID.GetGeneratedName(),
retryAttempt,
- taskExecutionID.GetID().RetryAttempt,
+ taskExecutionID.GetID().RetryAttempt, //nolint:protogetter
}
}
@@ -252,8 +252,8 @@ func NewSubTaskExecutionMetadata(taskExecutionMetadata pluginsCore.TaskExecution
var err error
secretsMap := make(map[string]string)
injectSecretsLabel := make(map[string]string)
- if taskTemplate.SecurityContext != nil && len(taskTemplate.SecurityContext.Secrets) > 0 {
- secretsMap, err = secrets.MarshalSecretsToMapStrings(taskTemplate.SecurityContext.Secrets)
+ if taskTemplate.GetSecurityContext() != nil && len(taskTemplate.GetSecurityContext().GetSecrets()) > 0 {
+ secretsMap, err = secrets.MarshalSecretsToMapStrings(taskTemplate.GetSecurityContext().GetSecrets())
if err != nil {
return SubTaskExecutionMetadata{}, err
}
@@ -264,6 +264,7 @@ func NewSubTaskExecutionMetadata(taskExecutionMetadata pluginsCore.TaskExecution
}
subTaskExecutionID := NewSubTaskExecutionID(taskExecutionMetadata.GetTaskExecutionID(), executionIndex, retryAttempt)
+ // #nosec G115
interruptible := taskExecutionMetadata.IsInterruptible() && int32(systemFailures) < taskExecutionMetadata.GetInterruptibleFailureThreshold()
return SubTaskExecutionMetadata{
taskExecutionMetadata,
diff --git a/flyteplugins/go/tasks/plugins/array/k8s/subtask_exec_context_test.go b/flyteplugins/go/tasks/plugins/array/k8s/subtask_exec_context_test.go
index a7f5aa20b4..83aead4f5e 100644
--- a/flyteplugins/go/tasks/plugins/array/k8s/subtask_exec_context_test.go
+++ b/flyteplugins/go/tasks/plugins/array/k8s/subtask_exec_context_test.go
@@ -31,8 +31,8 @@ func TestSubTaskExecutionContext(t *testing.T) {
subtaskTemplate, err := stCtx.TaskReader().Read(ctx)
assert.Nil(t, err)
- assert.Equal(t, int32(2), subtaskTemplate.TaskTypeVersion)
- assert.Equal(t, podPlugin.ContainerTaskType, subtaskTemplate.Type)
+ assert.Equal(t, int32(2), subtaskTemplate.GetTaskTypeVersion())
+ assert.Equal(t, podPlugin.ContainerTaskType, subtaskTemplate.GetType())
assert.Equal(t, storage.DataReference("/prefix/"), stCtx.OutputWriter().GetOutputPrefixPath())
assert.Equal(t, storage.DataReference("/raw_prefix/5/1"), stCtx.OutputWriter().GetRawOutputPrefix())
assert.Equal(t,
diff --git a/flyteplugins/go/tasks/plugins/array/outputs.go b/flyteplugins/go/tasks/plugins/array/outputs.go
index cb07fb0de1..611442de98 100644
--- a/flyteplugins/go/tasks/plugins/array/outputs.go
+++ b/flyteplugins/go/tasks/plugins/array/outputs.go
@@ -52,6 +52,7 @@ type assembleOutputsWorker struct {
func (w assembleOutputsWorker) Process(ctx context.Context, workItem workqueue.WorkItem) (workqueue.WorkStatus, error) {
i := workItem.(*outputAssembleItem)
+ // #nosec G115
outputReaders, err := ConstructOutputReaders(ctx, i.dataStore, i.outputPaths.GetOutputPrefixPath(), i.outputPaths.GetRawOutputPrefix(), int(i.finalPhases.ItemsCount))
if err != nil {
logger.Warnf(ctx, "Failed to construct output readers. Error: %v", err)
@@ -89,7 +90,7 @@ func (w assembleOutputsWorker) Process(ctx context.Context, workItem workqueue.W
// to aggregate outputs here
finalOutputs.Literals = output.GetLiterals()
} else {
- appendSubTaskOutput(finalOutputs, output, int64(i.finalPhases.ItemsCount))
+ appendSubTaskOutput(finalOutputs, output, int64(i.finalPhases.ItemsCount)) // #nosec G115
continue
}
}
@@ -110,7 +111,7 @@ func (w assembleOutputsWorker) Process(ctx context.Context, workItem workqueue.W
}
func appendOneItem(outputs *core.LiteralMap, varName string, literal *core.Literal, expectedSize int64) {
- existingVal, found := outputs.Literals[varName]
+ existingVal, found := outputs.GetLiterals()[varName]
var list *core.LiteralCollection
if found {
list = existingVal.GetCollection()
@@ -155,7 +156,7 @@ func buildFinalPhases(executedTasks bitarray.CompactArray, indexes *bitarray.Bit
// Set phases os already discovered tasks to success
for i := uint(0); i < totalSize; i++ {
if !indexes.IsSet(i) {
- res.SetItem(int(i), bitarray.Item(pluginCore.PhaseSuccess))
+ res.SetItem(int(i), bitarray.Item(pluginCore.PhaseSuccess)) // #nosec G115
}
}
@@ -199,14 +200,14 @@ func AssembleFinalOutputs(ctx context.Context, assemblyQueue OutputAssembler, tC
}
finalPhases := buildFinalPhases(state.GetArrayStatus().Detailed,
- state.GetIndexesToCache(), uint(state.GetOriginalArraySize()))
+ state.GetIndexesToCache(), uint(state.GetOriginalArraySize())) // #nosec G115
err = assemblyQueue.Queue(ctx, workItemID, &outputAssembleItem{
varNames: varNames,
finalPhases: finalPhases,
outputPaths: tCtx.OutputWriter(),
dataStore: tCtx.DataStore(),
- isAwsSingleJob: taskTemplate.Type == AwsBatchTaskType,
+ isAwsSingleJob: taskTemplate.GetType() == AwsBatchTaskType,
})
if err != nil {
@@ -274,6 +275,7 @@ type assembleErrorsWorker struct {
func (a assembleErrorsWorker) Process(ctx context.Context, workItem workqueue.WorkItem) (workqueue.WorkStatus, error) {
w := workItem.(*outputAssembleItem)
+ // #nosec G115
outputReaders, err := ConstructOutputReaders(ctx, w.dataStore, w.outputPaths.GetOutputPrefixPath(), w.outputPaths.GetRawOutputPrefix(), int(w.finalPhases.ItemsCount))
if err != nil {
return workqueue.WorkStatusNotDone, err
diff --git a/flyteplugins/go/tasks/plugins/hive/execution_state.go b/flyteplugins/go/tasks/plugins/hive/execution_state.go
index 16ac3835bd..b1d971d0d3 100644
--- a/flyteplugins/go/tasks/plugins/hive/execution_state.go
+++ b/flyteplugins/go/tasks/plugins/hive/execution_state.go
@@ -116,7 +116,7 @@ func MapExecutionStateToPhaseInfo(state ExecutionState, _ client.QuboleClient) c
if state.CreationFailureCount > 5 {
phaseInfo = core.PhaseInfoSystemRetryableFailure("QuboleFailure", "Too many creation attempts", nil)
} else {
- phaseInfo = core.PhaseInfoQueued(t, uint32(state.CreationFailureCount), "Waiting for Qubole launch")
+ phaseInfo = core.PhaseInfoQueued(t, uint32(state.CreationFailureCount), "Waiting for Qubole launch") // #nosec G115
}
case PhaseSubmitted:
phaseInfo = core.PhaseInfoRunning(core.DefaultPhaseVersion, ConstructTaskInfo(state))
@@ -240,7 +240,7 @@ func GetAllocationToken(ctx context.Context, tCtx core.TaskExecutionContext, cur
}
func validateQuboleHiveJob(hiveJob plugins.QuboleHiveJob) error {
- if hiveJob.Query == nil {
+ if hiveJob.GetQuery() == nil {
return errors.Errorf(errors.BadTaskSpecification,
"Query could not be found. Please ensure that you are at least on Flytekit version 0.3.0 or later.")
}
@@ -267,7 +267,7 @@ func GetQueryInfo(ctx context.Context, tCtx core.TaskExecutionContext) (
return "", "", []string{}, 0, "", err
}
- query := hiveJob.Query.GetQuery()
+ query := hiveJob.GetQuery().GetQuery()
outputs, err := template.Render(ctx, []string{query},
template.Parameters{
@@ -281,10 +281,10 @@ func GetQueryInfo(ctx context.Context, tCtx core.TaskExecutionContext) (
}
formattedQuery = outputs[0]
- cluster = hiveJob.ClusterLabel
- timeoutSec = hiveJob.Query.TimeoutSec
- taskName = taskTemplate.Id.Name
- tags = hiveJob.Tags
+ cluster = hiveJob.GetClusterLabel()
+ timeoutSec = hiveJob.GetQuery().GetTimeoutSec()
+ taskName = taskTemplate.GetId().GetName()
+ tags = hiveJob.GetTags()
tags = append(tags, fmt.Sprintf("ns:%s", tCtx.TaskExecutionMetadata().GetNamespace()))
for k, v := range tCtx.TaskExecutionMetadata().GetLabels() {
tags = append(tags, fmt.Sprintf("%s:%s", k, v))
@@ -326,8 +326,8 @@ func mapLabelToPrimaryLabel(ctx context.Context, quboleCfg *config.Config, label
func mapProjectDomainToDestinationClusterLabel(ctx context.Context, tCtx core.TaskExecutionContext, quboleCfg *config.Config) (string, bool) {
tExecID := tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetID()
- project := tExecID.NodeExecutionId.GetExecutionId().GetProject()
- domain := tExecID.NodeExecutionId.GetExecutionId().GetDomain()
+ project := tExecID.GetNodeExecutionId().GetExecutionId().GetProject()
+ domain := tExecID.GetNodeExecutionId().GetExecutionId().GetDomain()
logger.Debugf(ctx, "No clusterLabelOverride. Finding the pre-defined cluster label for (project: %v, domain: %v)", project, domain)
// Using a linear search because N is small
for _, m := range quboleCfg.DestinationClusterConfigs {
@@ -504,7 +504,7 @@ func WriteOutputs(ctx context.Context, tCtx core.TaskExecutionContext, currentSt
}
externalLocation := tCtx.OutputWriter().GetRawOutputPrefix()
- outputs := taskTemplate.Interface.Outputs.GetVariables()
+ outputs := taskTemplate.GetInterface().GetOutputs().GetVariables()
if len(outputs) != 0 && len(outputs) != 1 {
return currentState, errors.Errorf(errors.BadTaskSpecification, "Hive tasks must have zero or one output: [%d] found", len(outputs))
}
diff --git a/flyteplugins/go/tasks/plugins/hive/execution_state_test.go b/flyteplugins/go/tasks/plugins/hive/execution_state_test.go
index 4e34a04593..d67a53bb10 100644
--- a/flyteplugins/go/tasks/plugins/hive/execution_state_test.go
+++ b/flyteplugins/go/tasks/plugins/hive/execution_state_test.go
@@ -101,7 +101,7 @@ func TestConstructTaskLog(t *testing.T) {
u, err := url.Parse(expected)
assert.NoError(t, err)
taskLog := ConstructTaskLog(ExecutionState{CommandID: "123", URI: u.String()})
- assert.Equal(t, expected, taskLog.Uri)
+ assert.Equal(t, expected, taskLog.GetUri())
}
func TestConstructTaskInfo(t *testing.T) {
@@ -120,7 +120,7 @@ func TestConstructTaskInfo(t *testing.T) {
}
taskInfo := ConstructTaskInfo(e)
- assert.Equal(t, "https://wellness.qubole.com/v2/analyze?command_id=123", taskInfo.Logs[0].Uri)
+ assert.Equal(t, "https://wellness.qubole.com/v2/analyze?command_id=123", taskInfo.Logs[0].GetUri())
assert.Len(t, taskInfo.ExternalResources, 1)
assert.Equal(t, taskInfo.ExternalResources[0].ExternalID, "123")
}
@@ -358,7 +358,7 @@ func TestWriteOutputs(t *testing.T) {
literals, err1, err2 := reader.Read(context.Background())
assert.Nil(t, err1)
assert.NoError(t, err2)
- assert.NotNil(t, literals.Literals["results"].GetScalar().GetSchema())
+ assert.NotNil(t, literals.GetLiterals()["results"].GetScalar().GetSchema())
})
state := ExecutionState{}
diff --git a/flyteplugins/go/tasks/plugins/hive/executions_cache.go b/flyteplugins/go/tasks/plugins/hive/executions_cache.go
index 40885ab093..6ce2fcf6e2 100644
--- a/flyteplugins/go/tasks/plugins/hive/executions_cache.go
+++ b/flyteplugins/go/tasks/plugins/hive/executions_cache.go
@@ -39,7 +39,8 @@ func NewQuboleHiveExecutionsCache(ctx context.Context, quboleClient client.Qubol
scope: scope,
cfg: cfg,
}
- autoRefreshCache, err := cache.NewAutoRefreshCache("qubole", q.SyncQuboleQuery, workqueue.DefaultControllerRateLimiter(), ResyncDuration, cfg.Workers, cfg.LruCacheSize, scope)
+ // #nosec G115
+ autoRefreshCache, err := cache.NewAutoRefreshCache("qubole", q.SyncQuboleQuery, workqueue.DefaultControllerRateLimiter(), ResyncDuration, uint(cfg.Workers), uint(cfg.LruCacheSize), scope)
if err != nil {
logger.Errorf(ctx, "Could not create AutoRefreshCache in QuboleHiveExecutor. [%s]", err)
return q, errors.Wrapf(errors.CacheFailed, err, "Error creating AutoRefreshCache")
diff --git a/flyteplugins/go/tasks/plugins/k8s/dask/dask.go b/flyteplugins/go/tasks/plugins/k8s/dask/dask.go
index ae68a4c760..df0e4f3472 100644
--- a/flyteplugins/go/tasks/plugins/k8s/dask/dask.go
+++ b/flyteplugins/go/tasks/plugins/k8s/dask/dask.go
@@ -20,7 +20,7 @@ import (
"github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/flytek8s"
"github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/k8s"
"github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/tasklog"
- "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/utils"
+ "github.com/flyteorg/flyte/flytestdlib/utils"
)
const (
@@ -66,7 +66,7 @@ func (p daskResourceHandler) BuildResource(ctx context.Context, taskCtx pluginsC
}
daskJob := plugins.DaskJob{}
- err = utils.UnmarshalStruct(taskTemplate.GetCustom(), &daskJob)
+ err = utils.UnmarshalStructToPb(taskTemplate.GetCustom(), &daskJob)
if err != nil {
return nil, errors.Wrapf(errors.BadTaskSpecification, err, "invalid TaskSpecification [%v], failed to unmarshal", taskTemplate.GetCustom())
}
@@ -85,13 +85,13 @@ func (p daskResourceHandler) BuildResource(ctx context.Context, taskCtx pluginsC
mergeMapInto(taskCtx.TaskExecutionMetadata().GetAnnotations(), objectMeta.Annotations)
mergeMapInto(taskCtx.TaskExecutionMetadata().GetLabels(), objectMeta.Labels)
- workerSpec, err := createWorkerSpec(*daskJob.Workers, podSpec, primaryContainerName)
+ workerSpec, err := createWorkerSpec(daskJob.GetWorkers(), podSpec, primaryContainerName)
if err != nil {
return nil, err
}
clusterName := taskCtx.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName()
- schedulerSpec, err := createSchedulerSpec(*daskJob.Scheduler, clusterName, nonInterruptiblePodSpec, primaryContainerName)
+ schedulerSpec, err := createSchedulerSpec(daskJob.GetScheduler(), clusterName, nonInterruptiblePodSpec, primaryContainerName)
if err != nil {
return nil, err
}
@@ -112,7 +112,7 @@ func (p daskResourceHandler) BuildResource(ctx context.Context, taskCtx pluginsC
return job, nil
}
-func createWorkerSpec(cluster plugins.DaskWorkerGroup, podSpec *v1.PodSpec, primaryContainerName string) (*daskAPI.WorkerSpec, error) {
+func createWorkerSpec(cluster *plugins.DaskWorkerGroup, podSpec *v1.PodSpec, primaryContainerName string) (*daskAPI.WorkerSpec, error) {
workerPodSpec := podSpec.DeepCopy()
primaryContainer, err := flytek8s.GetContainer(workerPodSpec, primaryContainerName)
if err != nil {
@@ -128,7 +128,7 @@ func createWorkerSpec(cluster plugins.DaskWorkerGroup, podSpec *v1.PodSpec, prim
// Set custom resources
resources := &primaryContainer.Resources
clusterResources := cluster.GetResources()
- if len(clusterResources.Requests) >= 1 || len(clusterResources.Limits) >= 1 {
+ if len(clusterResources.GetRequests()) >= 1 || len(clusterResources.GetLimits()) >= 1 {
resources, err = flytek8s.ToK8sResourceRequirements(cluster.GetResources())
if err != nil {
return nil, err
@@ -174,7 +174,7 @@ func createWorkerSpec(cluster plugins.DaskWorkerGroup, podSpec *v1.PodSpec, prim
}, nil
}
-func createSchedulerSpec(scheduler plugins.DaskScheduler, clusterName string, podSpec *v1.PodSpec, primaryContainerName string) (*daskAPI.SchedulerSpec, error) {
+func createSchedulerSpec(scheduler *plugins.DaskScheduler, clusterName string, podSpec *v1.PodSpec, primaryContainerName string) (*daskAPI.SchedulerSpec, error) {
schedulerPodSpec := podSpec.DeepCopy()
primaryContainer, err := flytek8s.GetContainer(schedulerPodSpec, primaryContainerName)
if err != nil {
@@ -190,7 +190,7 @@ func createSchedulerSpec(scheduler plugins.DaskScheduler, clusterName string, po
// Override resources if applicable
resources := &primaryContainer.Resources
schedulerResources := scheduler.GetResources()
- if len(schedulerResources.Requests) >= 1 || len(schedulerResources.Limits) >= 1 {
+ if len(schedulerResources.GetRequests()) >= 1 || len(schedulerResources.GetLimits()) >= 1 {
resources, err = flytek8s.ToK8sResourceRequirements(scheduler.GetResources())
if err != nil {
return nil, err
diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/common/common_operator_test.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/common/common_operator_test.go
index d0e154835c..9196c788cc 100644
--- a/flyteplugins/go/tasks/plugins/k8s/kfoperators/common/common_operator_test.go
+++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/common/common_operator_test.go
@@ -178,7 +178,7 @@ func TestGetLogs(t *testing.T) {
jobLogs, err := GetLogs(taskCtx, MPITaskType, mpiJobObjectMeta, false, workers, launcher, 0, 0)
assert.NoError(t, err)
assert.Equal(t, 1, len(jobLogs))
- assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=mpi-namespace", "mpi-namespace", "test"), jobLogs[0].Uri)
+ assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=mpi-namespace", "mpi-namespace", "test"), jobLogs[0].GetUri())
pytorchJobObjectMeta := meta_v1.ObjectMeta{
Name: "test",
@@ -187,8 +187,8 @@ func TestGetLogs(t *testing.T) {
jobLogs, err = GetLogs(taskCtx, PytorchTaskType, pytorchJobObjectMeta, true, workers, launcher, 0, 0)
assert.NoError(t, err)
assert.Equal(t, 2, len(jobLogs))
- assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-master-0/pod?namespace=pytorch-namespace", "pytorch-namespace", "test"), jobLogs[0].Uri)
- assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=pytorch-namespace", "pytorch-namespace", "test"), jobLogs[1].Uri)
+ assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-master-0/pod?namespace=pytorch-namespace", "pytorch-namespace", "test"), jobLogs[0].GetUri())
+ assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=pytorch-namespace", "pytorch-namespace", "test"), jobLogs[1].GetUri())
tensorflowJobObjectMeta := meta_v1.ObjectMeta{
Name: "test",
@@ -197,9 +197,9 @@ func TestGetLogs(t *testing.T) {
jobLogs, err = GetLogs(taskCtx, TensorflowTaskType, tensorflowJobObjectMeta, false, workers, launcher, 1, 0)
assert.NoError(t, err)
assert.Equal(t, 3, len(jobLogs))
- assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=tensorflow-namespace", "tensorflow-namespace", "test"), jobLogs[0].Uri)
- assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-psReplica-0/pod?namespace=tensorflow-namespace", "tensorflow-namespace", "test"), jobLogs[1].Uri)
- assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-chiefReplica-0/pod?namespace=tensorflow-namespace", "tensorflow-namespace", "test"), jobLogs[2].Uri)
+ assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=tensorflow-namespace", "tensorflow-namespace", "test"), jobLogs[0].GetUri())
+ assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-psReplica-0/pod?namespace=tensorflow-namespace", "tensorflow-namespace", "test"), jobLogs[1].GetUri())
+ assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-chiefReplica-0/pod?namespace=tensorflow-namespace", "tensorflow-namespace", "test"), jobLogs[2].GetUri())
}
@@ -221,8 +221,8 @@ func TestGetLogsTemplateUri(t *testing.T) {
jobLogs, err := GetLogs(taskCtx, PytorchTaskType, pytorchJobObjectMeta, true, 1, 0, 0, 0)
assert.NoError(t, err)
assert.Equal(t, 2, len(jobLogs))
- assert.Equal(t, fmt.Sprintf("https://console.cloud.google.com/logs/query;query=resource.labels.pod_name=%s-master-0×tamp>%s", "test", "2022-01-01T12:00:00Z"), jobLogs[0].Uri)
- assert.Equal(t, fmt.Sprintf("https://console.cloud.google.com/logs/query;query=resource.labels.pod_name=%s-worker-0×tamp>%s", "test", "2022-01-01T12:00:00Z"), jobLogs[1].Uri)
+ assert.Equal(t, fmt.Sprintf("https://console.cloud.google.com/logs/query;query=resource.labels.pod_name=%s-master-0×tamp>%s", "test", "2022-01-01T12:00:00Z"), jobLogs[0].GetUri())
+ assert.Equal(t, fmt.Sprintf("https://console.cloud.google.com/logs/query;query=resource.labels.pod_name=%s-worker-0×tamp>%s", "test", "2022-01-01T12:00:00Z"), jobLogs[1].GetUri())
}
func dummyPodSpec() v1.PodSpec {
diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/mpi/mpi.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/mpi/mpi.go
index 53e4d30ccb..7ba2c0cb86 100644
--- a/flyteplugins/go/tasks/plugins/k8s/kfoperators/mpi/mpi.go
+++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/mpi/mpi.go
@@ -60,7 +60,7 @@ func (mpiOperatorResourceHandler) BuildResource(ctx context.Context, taskCtx plu
var launcherReplicaSpec, workerReplicaSpec *commonOp.ReplicaSpec
- if taskTemplate.TaskTypeVersion == 0 {
+ if taskTemplate.GetTaskTypeVersion() == 0 {
mpiTaskExtraArgs := plugins.DistributedMPITrainingTask{}
err = utils.UnmarshalStruct(taskTemplate.GetCustom(), &mpiTaskExtraArgs)
if err != nil {
@@ -98,7 +98,7 @@ func (mpiOperatorResourceHandler) BuildResource(ctx context.Context, taskCtx plu
}
}
- } else if taskTemplate.TaskTypeVersion == 1 {
+ } else if taskTemplate.GetTaskTypeVersion() == 1 {
kfMPITaskExtraArgs := kfplugins.DistributedMPITrainingTask{}
err = utils.UnmarshalStruct(taskTemplate.GetCustom(), &kfMPITaskExtraArgs)
@@ -122,7 +122,7 @@ func (mpiOperatorResourceHandler) BuildResource(ctx context.Context, taskCtx plu
} else {
return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification,
- "Invalid TaskSpecification, unsupported task template version [%v] key", taskTemplate.TaskTypeVersion)
+ "Invalid TaskSpecification, unsupported task template version [%v] key", taskTemplate.GetTaskTypeVersion())
}
if *workerReplicaSpec.Replicas <= 0 {
diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/mpi/mpi_test.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/mpi/mpi_test.go
index 6c0080d45a..2bc668f2c6 100644
--- a/flyteplugins/go/tasks/plugins/k8s/kfoperators/mpi/mpi_test.go
+++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/mpi/mpi_test.go
@@ -569,8 +569,8 @@ func TestGetLogs(t *testing.T) {
jobLogs, err := common.GetLogs(taskCtx, common.MPITaskType, mpiJob.ObjectMeta, false, workers, launcher, 0, 0)
assert.NoError(t, err)
assert.Equal(t, 2, len(jobLogs))
- assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=mpi-namespace", jobNamespace, jobName), jobLogs[0].Uri)
- assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-1/pod?namespace=mpi-namespace", jobNamespace, jobName), jobLogs[1].Uri)
+ assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=mpi-namespace", jobNamespace, jobName), jobLogs[0].GetUri())
+ assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-1/pod?namespace=mpi-namespace", jobNamespace, jobName), jobLogs[1].GetUri())
}
func TestGetProperties(t *testing.T) {
diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch.go
index 6d7c80a7fd..0ee3f3570f 100644
--- a/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch.go
+++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch.go
@@ -61,7 +61,7 @@ func (pytorchOperatorResourceHandler) BuildResource(ctx context.Context, taskCtx
var masterReplicaSpec, workerReplicaSpec *commonOp.ReplicaSpec
- if taskTemplate.TaskTypeVersion == 0 {
+ if taskTemplate.GetTaskTypeVersion() == 0 {
pytorchTaskExtraArgs := plugins.DistributedPyTorchTrainingTask{}
err = utils.UnmarshalStruct(taskTemplate.GetCustom(), &pytorchTaskExtraArgs)
@@ -85,7 +85,7 @@ func (pytorchOperatorResourceHandler) BuildResource(ctx context.Context, taskCtx
if elasticConfig != nil {
elasticPolicy = ParseElasticConfig(elasticConfig)
}
- } else if taskTemplate.TaskTypeVersion == 1 {
+ } else if taskTemplate.GetTaskTypeVersion() == 1 {
kfPytorchTaskExtraArgs := kfplugins.DistributedPyTorchTrainingTask{}
err = utils.UnmarshalStruct(taskTemplate.GetCustom(), &kfPytorchTaskExtraArgs)
@@ -132,7 +132,7 @@ func (pytorchOperatorResourceHandler) BuildResource(ctx context.Context, taskCtx
}
} else {
return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification,
- "Invalid TaskSpecification, unsupported task template version [%v] key", taskTemplate.TaskTypeVersion)
+ "Invalid TaskSpecification, unsupported task template version [%v] key", taskTemplate.GetTaskTypeVersion())
}
if *workerReplicaSpec.Replicas <= 0 {
diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch_test.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch_test.go
index 814b340fe6..741a8a00c8 100644
--- a/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch_test.go
+++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch_test.go
@@ -688,9 +688,9 @@ func TestGetLogs(t *testing.T) {
jobLogs, err := common.GetLogs(taskCtx, common.PytorchTaskType, pytorchJob.ObjectMeta, hasMaster, workers, 0, 0, 0)
assert.NoError(t, err)
assert.Equal(t, 3, len(jobLogs))
- assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-master-0/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[0].Uri)
- assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[1].Uri)
- assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-1/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[2].Uri)
+ assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-master-0/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[0].GetUri())
+ assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[1].GetUri())
+ assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-1/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[2].GetUri())
}
func TestGetLogsElastic(t *testing.T) {
@@ -708,8 +708,8 @@ func TestGetLogsElastic(t *testing.T) {
jobLogs, err := common.GetLogs(taskCtx, common.PytorchTaskType, pytorchJob.ObjectMeta, hasMaster, workers, 0, 0, 0)
assert.NoError(t, err)
assert.Equal(t, 2, len(jobLogs))
- assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[0].Uri)
- assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-1/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[1].Uri)
+ assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[0].GetUri())
+ assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-1/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[1].GetUri())
}
func TestGetProperties(t *testing.T) {
diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/tensorflow/tensorflow.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/tensorflow/tensorflow.go
index 93b4d91cd2..3c0a3e9485 100644
--- a/flyteplugins/go/tasks/plugins/k8s/kfoperators/tensorflow/tensorflow.go
+++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/tensorflow/tensorflow.go
@@ -55,7 +55,7 @@ func (tensorflowOperatorResourceHandler) BuildResource(ctx context.Context, task
replicaSpecMap := make(map[commonOp.ReplicaType]*commonOp.ReplicaSpec)
runPolicy := commonOp.RunPolicy{}
- if taskTemplate.TaskTypeVersion == 0 {
+ if taskTemplate.GetTaskTypeVersion() == 0 {
tensorflowTaskExtraArgs := plugins.DistributedTensorflowTrainingTask{}
err = utils.UnmarshalStruct(taskTemplate.GetCustom(), &tensorflowTaskExtraArgs)
@@ -83,7 +83,7 @@ func (tensorflowOperatorResourceHandler) BuildResource(ctx context.Context, task
}
}
- } else if taskTemplate.TaskTypeVersion == 1 {
+ } else if taskTemplate.GetTaskTypeVersion() == 1 {
kfTensorflowTaskExtraArgs := kfplugins.DistributedTensorflowTrainingTask{}
err = utils.UnmarshalStruct(taskTemplate.GetCustom(), &kfTensorflowTaskExtraArgs)
@@ -125,7 +125,7 @@ func (tensorflowOperatorResourceHandler) BuildResource(ctx context.Context, task
} else {
return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification,
- "Invalid TaskSpecification, unsupported task template version [%v] key", taskTemplate.TaskTypeVersion)
+ "Invalid TaskSpecification, unsupported task template version [%v] key", taskTemplate.GetTaskTypeVersion())
}
if v, ok := replicaSpecMap[kubeflowv1.TFJobReplicaTypeWorker]; !ok || *v.Replicas <= 0 {
diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/tensorflow/tensorflow_test.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/tensorflow/tensorflow_test.go
index d4d6e6da17..22b750c22b 100644
--- a/flyteplugins/go/tasks/plugins/k8s/kfoperators/tensorflow/tensorflow_test.go
+++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/tensorflow/tensorflow_test.go
@@ -628,11 +628,11 @@ func TestGetLogs(t *testing.T) {
workers, psReplicas, chiefReplicas, evaluatorReplicas)
assert.NoError(t, err)
assert.Equal(t, 5, len(jobLogs))
- assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[0].Uri)
- assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-1/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[1].Uri)
- assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-psReplica-0/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[2].Uri)
- assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-chiefReplica-0/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[3].Uri)
- assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-evaluatorReplica-0/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[4].Uri)
+ assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[0].GetUri())
+ assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-1/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[1].GetUri())
+ assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-psReplica-0/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[2].GetUri())
+ assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-chiefReplica-0/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[3].GetUri())
+ assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-evaluatorReplica-0/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[4].GetUri())
}
func TestGetProperties(t *testing.T) {
diff --git a/flyteplugins/go/tasks/plugins/k8s/pod/plugin.go b/flyteplugins/go/tasks/plugins/k8s/pod/plugin.go
index 2a08cd0e6c..60b0d5c8d5 100644
--- a/flyteplugins/go/tasks/plugins/k8s/pod/plugin.go
+++ b/flyteplugins/go/tasks/plugins/k8s/pod/plugin.go
@@ -59,7 +59,7 @@ func (p plugin) BuildResource(ctx context.Context, taskCtx pluginsCore.TaskExecu
}
primaryContainerName := ""
- if taskTemplate.Type == SidecarTaskType && taskTemplate.TaskTypeVersion == 0 {
+ if taskTemplate.GetType() == SidecarTaskType && taskTemplate.GetTaskTypeVersion() == 0 {
// handles pod tasks when they are defined as Sidecar tasks and marshal the podspec using k8s proto.
sidecarJob := sidecarJob{}
err := utils.UnmarshalStructToObj(taskTemplate.GetCustom(), &sidecarJob)
@@ -79,7 +79,7 @@ func (p plugin) BuildResource(ctx context.Context, taskCtx pluginsCore.TaskExecu
// update annotations and labels
objectMeta.Annotations = utils.UnionMaps(objectMeta.Annotations, sidecarJob.Annotations)
objectMeta.Labels = utils.UnionMaps(objectMeta.Labels, sidecarJob.Labels)
- } else if taskTemplate.Type == SidecarTaskType && taskTemplate.TaskTypeVersion == 1 {
+ } else if taskTemplate.GetType() == SidecarTaskType && taskTemplate.GetTaskTypeVersion() == 1 {
// handles pod tasks that marshal the pod spec to the task custom.
err := utils.UnmarshalStructToObj(taskTemplate.GetCustom(), &podSpec)
if err != nil {
@@ -100,9 +100,9 @@ func (p plugin) BuildResource(ctx context.Context, taskCtx pluginsCore.TaskExecu
}
// update annotations and labels
- if taskTemplate.GetK8SPod() != nil && taskTemplate.GetK8SPod().Metadata != nil {
- objectMeta.Annotations = utils.UnionMaps(objectMeta.Annotations, taskTemplate.GetK8SPod().Metadata.Annotations)
- objectMeta.Labels = utils.UnionMaps(objectMeta.Labels, taskTemplate.GetK8SPod().Metadata.Labels)
+ if taskTemplate.GetK8SPod() != nil && taskTemplate.GetK8SPod().GetMetadata() != nil {
+ objectMeta.Annotations = utils.UnionMaps(objectMeta.Annotations, taskTemplate.GetK8SPod().GetMetadata().GetAnnotations())
+ objectMeta.Labels = utils.UnionMaps(objectMeta.Labels, taskTemplate.GetK8SPod().GetMetadata().GetLabels())
}
} else {
// handles both container / pod tasks that use the TaskTemplate Container and K8sPod fields
@@ -122,7 +122,7 @@ func (p plugin) BuildResource(ctx context.Context, taskCtx pluginsCore.TaskExecu
// set primaryContainerKey annotation if this is a Sidecar task or, as an optimization, if there is only a single
// container. this plugin marks the task complete if the primary Container is complete, so if there is only one
// container we can mark the task as complete before the Pod has been marked complete.
- if taskTemplate.Type == SidecarTaskType || len(podSpec.Containers) == 1 {
+ if taskTemplate.GetType() == SidecarTaskType || len(podSpec.Containers) == 1 {
objectMeta.Annotations[flytek8s.PrimaryContainerKey] = primaryContainerName
}
diff --git a/flyteplugins/go/tasks/plugins/k8s/ray/ray.go b/flyteplugins/go/tasks/plugins/k8s/ray/ray.go
index cf55e29d07..c1e5dd264d 100644
--- a/flyteplugins/go/tasks/plugins/k8s/ray/ray.go
+++ b/flyteplugins/go/tasks/plugins/k8s/ray/ray.go
@@ -94,8 +94,8 @@ func (rayJobResourceHandler) BuildResource(ctx context.Context, taskCtx pluginsC
cfg := GetConfig()
headNodeRayStartParams := make(map[string]string)
- if rayJob.RayCluster.HeadGroupSpec != nil && rayJob.RayCluster.HeadGroupSpec.RayStartParams != nil {
- headNodeRayStartParams = rayJob.RayCluster.HeadGroupSpec.RayStartParams
+ if rayJob.GetRayCluster().GetHeadGroupSpec() != nil && rayJob.RayCluster.HeadGroupSpec.RayStartParams != nil {
+ headNodeRayStartParams = rayJob.GetRayCluster().GetHeadGroupSpec().GetRayStartParams()
} else if headNode := cfg.Defaults.HeadNode; len(headNode.StartParameters) > 0 {
headNodeRayStartParams = headNode.StartParameters
}
@@ -133,7 +133,7 @@ func constructRayJob(taskCtx pluginsCore.TaskExecutionContext, rayJob *plugins.R
headPodSpec,
objectMeta,
taskCtx,
- rayJob.RayCluster.HeadGroupSpec,
+ rayJob.GetRayCluster().GetHeadGroupSpec(),
)
if err != nil {
return nil, err
@@ -150,7 +150,7 @@ func constructRayJob(taskCtx pluginsCore.TaskExecutionContext, rayJob *plugins.R
EnableInTreeAutoscaling: &rayJob.RayCluster.EnableAutoscaling,
}
- for _, spec := range rayJob.RayCluster.WorkerGroupSpec {
+ for _, spec := range rayJob.GetRayCluster().GetWorkerGroupSpec() {
workerPodSpec := taskPodSpec.DeepCopy()
workerPodTemplate, err := buildWorkerPodTemplate(
&workerPodSpec.Containers[primaryContainerIdx],
@@ -165,7 +165,7 @@ func constructRayJob(taskCtx pluginsCore.TaskExecutionContext, rayJob *plugins.R
workerNodeRayStartParams := make(map[string]string)
if spec.RayStartParams != nil {
- workerNodeRayStartParams = spec.RayStartParams
+ workerNodeRayStartParams = spec.GetRayStartParams()
} else if workerNode := cfg.Defaults.WorkerNode; len(workerNode.StartParameters) > 0 {
workerNodeRayStartParams = workerNode.StartParameters
}
@@ -178,17 +178,17 @@ func constructRayJob(taskCtx pluginsCore.TaskExecutionContext, rayJob *plugins.R
workerNodeRayStartParams[DisableUsageStatsStartParameter] = DisableUsageStatsStartParameterVal
}
- minReplicas := spec.MinReplicas
- if minReplicas > spec.Replicas {
- minReplicas = spec.Replicas
+ minReplicas := spec.GetMinReplicas()
+ if minReplicas > spec.GetReplicas() {
+ minReplicas = spec.GetReplicas()
}
- maxReplicas := spec.MaxReplicas
- if maxReplicas < spec.Replicas {
- maxReplicas = spec.Replicas
+ maxReplicas := spec.GetMaxReplicas()
+ if maxReplicas < spec.GetReplicas() {
+ maxReplicas = spec.GetReplicas()
}
workerNodeSpec := rayv1.WorkerGroupSpec{
- GroupName: spec.GroupName,
+ GroupName: spec.GetGroupName(),
MinReplicas: &minReplicas,
MaxReplicas: &maxReplicas,
Replicas: &spec.Replicas,
@@ -211,7 +211,7 @@ func constructRayJob(taskCtx pluginsCore.TaskExecutionContext, rayJob *plugins.R
shutdownAfterJobFinishes := cfg.ShutdownAfterJobFinishes
ttlSecondsAfterFinished := &cfg.TTLSecondsAfterFinished
- if rayJob.ShutdownAfterJobFinishes {
+ if rayJob.GetShutdownAfterJobFinishes() {
shutdownAfterJobFinishes = true
ttlSecondsAfterFinished = &rayJob.TtlSecondsAfterFinished
}
@@ -221,10 +221,10 @@ func constructRayJob(taskCtx pluginsCore.TaskExecutionContext, rayJob *plugins.R
// TODO: This is for backward compatibility. Remove this block once runtime_env is removed from ray proto.
var runtimeEnvYaml string
- runtimeEnvYaml = rayJob.RuntimeEnvYaml
+ runtimeEnvYaml = rayJob.GetRuntimeEnvYaml()
// If runtime_env exists but runtime_env_yaml does not, convert runtime_env to runtime_env_yaml
- if rayJob.RuntimeEnv != "" && rayJob.RuntimeEnvYaml == "" {
- runtimeEnvYaml, err = convertBase64RuntimeEnvToYaml(rayJob.RuntimeEnv)
+ if rayJob.GetRuntimeEnv() != "" && rayJob.GetRuntimeEnvYaml() == "" {
+ runtimeEnvYaml, err = convertBase64RuntimeEnvToYaml(rayJob.GetRuntimeEnv())
if err != nil {
return nil, err
}
@@ -369,7 +369,7 @@ func buildHeadPodTemplate(primaryContainer *v1.Container, basePodSpec *v1.PodSpe
// Inject a sidecar for capturing and exposing Ray job logs
injectLogsSidecar(primaryContainer, basePodSpec)
- basePodSpec, err := mergeCustomPodSpec(primaryContainer, basePodSpec, spec.K8SPod)
+ basePodSpec, err := mergeCustomPodSpec(primaryContainer, basePodSpec, spec.GetK8SPod())
if err != nil {
return v1.PodTemplateSpec{}, err
}
@@ -497,7 +497,7 @@ func buildWorkerPodTemplate(primaryContainer *v1.Container, basePodSpec *v1.PodS
}
primaryContainer.Ports = append(primaryContainer.Ports, ports...)
- basePodSpec, err := mergeCustomPodSpec(primaryContainer, basePodSpec, spec.K8SPod)
+ basePodSpec, err := mergeCustomPodSpec(primaryContainer, basePodSpec, spec.GetK8SPod())
if err != nil {
return v1.PodTemplateSpec{}, err
}
@@ -517,16 +517,16 @@ func mergeCustomPodSpec(primaryContainer *v1.Container, podSpec *v1.PodSpec, k8s
return podSpec, nil
}
- if k8sPod.PodSpec == nil {
+ if k8sPod.GetPodSpec() == nil {
return podSpec, nil
}
var customPodSpec *v1.PodSpec
- err := utils.UnmarshalStructToObj(k8sPod.PodSpec, &customPodSpec)
+ err := utils.UnmarshalStructToObj(k8sPod.GetPodSpec(), &customPodSpec)
if err != nil {
return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification,
- "Unable to unmarshal pod spec [%v], Err: [%v]", k8sPod.PodSpec, err.Error())
+ "Unable to unmarshal pod spec [%v], Err: [%v]", k8sPod.GetPodSpec(), err.Error())
}
for _, container := range customPodSpec.Containers {
diff --git a/flyteplugins/go/tasks/plugins/k8s/ray/ray_test.go b/flyteplugins/go/tasks/plugins/k8s/ray/ray_test.go
index 2cd3eb8893..708939485b 100644
--- a/flyteplugins/go/tasks/plugins/k8s/ray/ray_test.go
+++ b/flyteplugins/go/tasks/plugins/k8s/ray/ray_test.go
@@ -500,7 +500,7 @@ func TestBuildResourceRayCustomK8SPod(t *testing.T) {
}
if p.workerK8SPod != nil {
- for _, spec := range rayJobInput.RayCluster.WorkerGroupSpec {
+ for _, spec := range rayJobInput.GetRayCluster().GetWorkerGroupSpec() {
spec.K8SPod = p.workerK8SPod
}
}
diff --git a/flyteplugins/go/tasks/plugins/k8s/spark/spark.go b/flyteplugins/go/tasks/plugins/k8s/spark/spark.go
index 8b766a391a..6873fc2257 100644
--- a/flyteplugins/go/tasks/plugins/k8s/spark/spark.go
+++ b/flyteplugins/go/tasks/plugins/k8s/spark/spark.go
@@ -44,7 +44,7 @@ func validateSparkJob(sparkJob *plugins.SparkJob) error {
return fmt.Errorf("empty sparkJob")
}
- if len(sparkJob.MainApplicationFile) == 0 && len(sparkJob.MainClass) == 0 {
+ if len(sparkJob.GetMainApplicationFile()) == 0 && len(sparkJob.GetMainClass()) == 0 {
return fmt.Errorf("either MainApplicationFile or MainClass must be set")
}
@@ -262,10 +262,10 @@ func createSparkApplication(sparkJob *plugins.SparkJob, sparkConfig map[string]s
app.Spec.BatchScheduler = &val
}
- if sparkJob.MainApplicationFile != "" {
+ if sparkJob.GetMainApplicationFile() != "" {
app.Spec.MainApplicationFile = &sparkJob.MainApplicationFile
}
- if sparkJob.MainClass != "" {
+ if sparkJob.GetMainClass() != "" {
app.Spec.MainClass = &sparkJob.MainClass
}
return app
diff --git a/flyteplugins/go/tasks/plugins/k8s/spark/spark_test.go b/flyteplugins/go/tasks/plugins/k8s/spark/spark_test.go
index 7ea6c42be2..0a6f51d0e2 100644
--- a/flyteplugins/go/tasks/plugins/k8s/spark/spark_test.go
+++ b/flyteplugins/go/tasks/plugins/k8s/spark/spark_test.go
@@ -101,10 +101,10 @@ func TestGetEventInfo(t *testing.T) {
info, err := getEventInfoForSpark(taskCtx, dummySparkApplication(sj.RunningState))
assert.NoError(t, err)
assert.Len(t, info.Logs, 6)
- assert.Equal(t, "https://spark-ui.flyte", info.CustomInfo.Fields[sparkDriverUI].GetStringValue())
+ assert.Equal(t, "https://spark-ui.flyte", info.CustomInfo.GetFields()[sparkDriverUI].GetStringValue())
generatedLinks := make([]string, 0, len(info.Logs))
for _, l := range info.Logs {
- generatedLinks = append(generatedLinks, l.Uri)
+ generatedLinks = append(generatedLinks, l.GetUri())
}
expectedLinks := []string{
@@ -121,12 +121,12 @@ func TestGetEventInfo(t *testing.T) {
info, err = getEventInfoForSpark(taskCtx, dummySparkApplication(sj.SubmittedState))
generatedLinks = make([]string, 0, len(info.Logs))
for _, l := range info.Logs {
- generatedLinks = append(generatedLinks, l.Uri)
+ generatedLinks = append(generatedLinks, l.GetUri())
}
assert.NoError(t, err)
assert.Len(t, info.Logs, 5)
assert.Equal(t, expectedLinks[:5], generatedLinks) // No Spark Driver UI for Submitted state
- assert.True(t, info.Logs[4].ShowWhilePending) // All User Logs should be shown while pending
+ assert.True(t, info.Logs[4].GetShowWhilePending()) // All User Logs should be shown while pending
assert.NoError(t, setSparkConfig(&Config{
SparkHistoryServerURL: "spark-history.flyte",
@@ -151,10 +151,10 @@ func TestGetEventInfo(t *testing.T) {
info, err = getEventInfoForSpark(taskCtx, dummySparkApplication(sj.FailedState))
assert.NoError(t, err)
assert.Len(t, info.Logs, 5)
- assert.Equal(t, "spark-history.flyte/history/app-id", info.CustomInfo.Fields[sparkHistoryUI].GetStringValue())
+ assert.Equal(t, "spark-history.flyte/history/app-id", info.CustomInfo.GetFields()[sparkHistoryUI].GetStringValue())
generatedLinks = make([]string, 0, len(info.Logs))
for _, l := range info.Logs {
- generatedLinks = append(generatedLinks, l.Uri)
+ generatedLinks = append(generatedLinks, l.GetUri())
}
expectedLinks = []string{
diff --git a/flyteplugins/go/tasks/plugins/presto/execution_state.go b/flyteplugins/go/tasks/plugins/presto/execution_state.go
index 3399c013ae..88edb30cb8 100644
--- a/flyteplugins/go/tasks/plugins/presto/execution_state.go
+++ b/flyteplugins/go/tasks/plugins/presto/execution_state.go
@@ -217,10 +217,10 @@ func GetQueryInfo(ctx context.Context, tCtx core.TaskExecutionContext) (string,
}
outputs, err := template.Render(ctx, []string{
- prestoQuery.RoutingGroup,
- prestoQuery.Catalog,
- prestoQuery.Schema,
- prestoQuery.Statement,
+ prestoQuery.GetRoutingGroup(),
+ prestoQuery.GetCatalog(),
+ prestoQuery.GetSchema(),
+ prestoQuery.GetStatement(),
}, template.Parameters{
TaskExecMetadata: tCtx.TaskExecutionMetadata(),
Inputs: tCtx.InputReader(),
@@ -241,7 +241,7 @@ func GetQueryInfo(ctx context.Context, tCtx core.TaskExecutionContext) (string,
}
func validatePrestoStatement(prestoJob plugins.PrestoQuery) error {
- if prestoJob.Statement == "" {
+ if prestoJob.GetStatement() == "" {
return errors.Errorf(errors.BadTaskSpecification,
"Query could not be found. Please ensure that you are at least on Flytekit version 0.3.0 or later.")
}
@@ -440,7 +440,7 @@ func writeOutput(ctx context.Context, tCtx core.TaskExecutionContext, externalLo
return err
}
- results := taskTemplate.Interface.Outputs.Variables["results"]
+ results := taskTemplate.GetInterface().GetOutputs().GetVariables()["results"]
return tCtx.OutputWriter().Put(ctx, ioutils.NewInMemoryOutputReader(
&pb.LiteralMap{
@@ -474,13 +474,13 @@ func MapExecutionStateToPhaseInfo(state ExecutionState) core.PhaseInfo {
if state.CreationFailureCount > 5 {
phaseInfo = core.PhaseInfoRetryableFailure("PrestoFailure", "Too many creation attempts", nil)
} else {
- phaseInfo = core.PhaseInfoRunning(uint32(3*state.QueryCount+1), ConstructTaskInfo(state))
+ phaseInfo = core.PhaseInfoRunning(uint32(3*state.QueryCount+1), ConstructTaskInfo(state)) // #nosec G115
}
case PhaseSubmitted:
- phaseInfo = core.PhaseInfoRunning(uint32(3*state.QueryCount+2), ConstructTaskInfo(state))
+ phaseInfo = core.PhaseInfoRunning(uint32(3*state.QueryCount+2), ConstructTaskInfo(state)) // #nosec G115
case PhaseQuerySucceeded:
if state.QueryCount < 5 {
- phaseInfo = core.PhaseInfoRunning(uint32(3*state.QueryCount+3), ConstructTaskInfo(state))
+ phaseInfo = core.PhaseInfoRunning(uint32(3*state.QueryCount+3), ConstructTaskInfo(state)) // #nosec G115
} else {
phaseInfo = core.PhaseInfoSuccess(ConstructTaskInfo(state))
}
diff --git a/flyteplugins/go/tasks/plugins/presto/execution_state_test.go b/flyteplugins/go/tasks/plugins/presto/execution_state_test.go
index 4d20d64ee6..e89f1af4ae 100644
--- a/flyteplugins/go/tasks/plugins/presto/execution_state_test.go
+++ b/flyteplugins/go/tasks/plugins/presto/execution_state_test.go
@@ -84,7 +84,7 @@ func TestConstructTaskLog(t *testing.T) {
u, err := url.Parse(expected)
assert.NoError(t, err)
taskLog := ConstructTaskLog(ExecutionState{CommandID: "123", URI: u.String()})
- assert.Equal(t, expected, taskLog.Uri)
+ assert.Equal(t, expected, taskLog.GetUri())
}
func TestConstructTaskInfo(t *testing.T) {
@@ -103,7 +103,7 @@ func TestConstructTaskInfo(t *testing.T) {
}
taskInfo := ConstructTaskInfo(e)
- assert.Equal(t, "https://prestoproxy-internal.flyteorg.net:443", taskInfo.Logs[0].Uri)
+ assert.Equal(t, "https://prestoproxy-internal.flyteorg.net:443", taskInfo.Logs[0].GetUri())
assert.Len(t, taskInfo.ExternalResources, 1)
assert.Equal(t, taskInfo.ExternalResources[0].ExternalID, "123")
}
diff --git a/flyteplugins/go/tasks/plugins/presto/executions_cache.go b/flyteplugins/go/tasks/plugins/presto/executions_cache.go
index cc5248c0f2..b41df763d0 100644
--- a/flyteplugins/go/tasks/plugins/presto/executions_cache.go
+++ b/flyteplugins/go/tasks/plugins/presto/executions_cache.go
@@ -36,7 +36,8 @@ func NewPrestoExecutionsCache(
scope: scope,
cfg: cfg,
}
- autoRefreshCache, err := cache.NewAutoRefreshCache(cfg.RefreshCacheConfig.Name, q.SyncPrestoQuery, workqueue.DefaultControllerRateLimiter(), cfg.RefreshCacheConfig.SyncPeriod.Duration, cfg.RefreshCacheConfig.Workers, cfg.RefreshCacheConfig.LruCacheSize, scope)
+ // #nosec G115
+ autoRefreshCache, err := cache.NewAutoRefreshCache(cfg.RefreshCacheConfig.Name, q.SyncPrestoQuery, workqueue.DefaultControllerRateLimiter(), cfg.RefreshCacheConfig.SyncPeriod.Duration, uint(cfg.RefreshCacheConfig.Workers), uint(cfg.RefreshCacheConfig.LruCacheSize), scope)
if err != nil {
logger.Errorf(ctx, "Could not create AutoRefreshCache in Executor. [%s]", err)
return q, errors.Wrapf(errors.CacheFailed, err, "Error creating AutoRefreshCache")
diff --git a/flyteplugins/go/tasks/plugins/testing/echo.go b/flyteplugins/go/tasks/plugins/testing/echo.go
index 09c4dc53b1..00ca339f20 100644
--- a/flyteplugins/go/tasks/plugins/testing/echo.go
+++ b/flyteplugins/go/tasks/plugins/testing/echo.go
@@ -104,7 +104,7 @@ func copyInputsToOutputs(ctx context.Context, tCtx core.TaskExecutionContext) (c
outputLiterals := make(map[string]*idlcore.Literal, len(inputToOutputVariableMappings))
for inputVariableName, outputVariableName := range inputToOutputVariableMappings {
- outputLiterals[outputVariableName] = inputLiterals.Literals[inputVariableName]
+ outputLiterals[outputVariableName] = inputLiterals.GetLiterals()[inputVariableName]
}
outputLiteralMap := &idlcore.LiteralMap{
@@ -132,12 +132,12 @@ func compileInputToOutputVariableMappings(ctx context.Context, tCtx core.TaskExe
}
var inputs, outputs map[string]*idlcore.Variable
- if taskTemplate.Interface != nil {
- if taskTemplate.Interface.Inputs != nil {
- inputs = taskTemplate.Interface.Inputs.Variables
+ if taskTemplate.GetInterface() != nil {
+ if taskTemplate.GetInterface().GetInputs() != nil {
+ inputs = taskTemplate.GetInterface().GetInputs().GetVariables()
}
- if taskTemplate.Interface.Outputs != nil {
- outputs = taskTemplate.Interface.Outputs.Variables
+ if taskTemplate.GetInterface().GetOutputs() != nil {
+ outputs = taskTemplate.GetInterface().GetOutputs().GetVariables()
}
}
diff --git a/flyteplugins/go/tasks/plugins/webapi/agent/client.go b/flyteplugins/go/tasks/plugins/webapi/agent/client.go
index 148113fb38..04c464eaa3 100644
--- a/flyteplugins/go/tasks/plugins/webapi/agent/client.go
+++ b/flyteplugins/go/tasks/plugins/webapi/agent/client.go
@@ -130,16 +130,16 @@ func getAgentRegistry(ctx context.Context, cs *ClientSet) Registry {
agentSupportedTaskCategories := make(map[string]struct{})
for _, agent := range res.GetAgents() {
- deprecatedSupportedTaskTypes := agent.SupportedTaskTypes
+ deprecatedSupportedTaskTypes := agent.GetSupportedTaskTypes()
for _, supportedTaskType := range deprecatedSupportedTaskTypes {
- agent := &Agent{AgentDeployment: agentDeployment, IsSync: agent.IsSync}
+ agent := &Agent{AgentDeployment: agentDeployment, IsSync: agent.GetIsSync()}
newAgentRegistry[supportedTaskType] = map[int32]*Agent{defaultTaskTypeVersion: agent}
agentSupportedTaskCategories[supportedTaskType] = struct{}{}
}
- supportedTaskCategories := agent.SupportedTaskCategories
+ supportedTaskCategories := agent.GetSupportedTaskCategories()
for _, supportedCategory := range supportedTaskCategories {
- agent := &Agent{AgentDeployment: agentDeployment, IsSync: agent.IsSync}
+ agent := &Agent{AgentDeployment: agentDeployment, IsSync: agent.GetIsSync()}
supportedCategoryName := supportedCategory.GetName()
newAgentRegistry[supportedCategoryName] = map[int32]*Agent{supportedCategory.GetVersion(): agent}
agentSupportedTaskCategories[supportedCategoryName] = struct{}{}
diff --git a/flyteplugins/go/tasks/plugins/webapi/agent/integration_test.go b/flyteplugins/go/tasks/plugins/webapi/agent/integration_test.go
index ba74fbf5d2..5348b71ebb 100644
--- a/flyteplugins/go/tasks/plugins/webapi/agent/integration_test.go
+++ b/flyteplugins/go/tasks/plugins/webapi/agent/integration_test.go
@@ -261,7 +261,7 @@ func newMockAsyncAgentPlugin() webapi.PluginEntry {
mockCreateRequestMatcher := mock.MatchedBy(func(request *admin.CreateTaskRequest) bool {
expectedArgs := []string{"pyflyte-fast-execute", "--output-prefix", "/tmp/123"}
- return slices.Equal(request.Template.GetContainer().Args, expectedArgs)
+ return slices.Equal(request.GetTemplate().GetContainer().GetArgs(), expectedArgs)
})
asyncAgentClient.On("CreateTask", mock.Anything, mockCreateRequestMatcher).Return(&admin.CreateTaskResponse{
ResourceMeta: []byte{1, 2, 3, 4}}, nil)
diff --git a/flyteplugins/go/tasks/plugins/webapi/agent/plugin.go b/flyteplugins/go/tasks/plugins/webapi/agent/plugin.go
index 4fffe2bee5..86e700619f 100644
--- a/flyteplugins/go/tasks/plugins/webapi/agent/plugin.go
+++ b/flyteplugins/go/tasks/plugins/webapi/agent/plugin.go
@@ -94,8 +94,8 @@ func (p *Plugin) Create(ctx context.Context, taskCtx webapi.TaskExecutionContext
OutputPath: taskCtx.OutputWriter(),
Task: taskCtx.TaskReader(),
}
- argTemplate = taskTemplate.GetContainer().Args
- modifiedArgs, err := template.Render(ctx, taskTemplate.GetContainer().Args, templateParameters)
+ argTemplate = taskTemplate.GetContainer().GetArgs()
+ modifiedArgs, err := template.Render(ctx, taskTemplate.GetContainer().GetArgs(), templateParameters)
if err != nil {
return nil, nil, err
}
@@ -107,7 +107,7 @@ func (p *Plugin) Create(ctx context.Context, taskCtx webapi.TaskExecutionContext
}
outputPrefix := taskCtx.OutputWriter().GetOutputPrefixPath().String()
- taskCategory := admin.TaskCategory{Name: taskTemplate.Type, Version: taskTemplate.TaskTypeVersion}
+ taskCategory := admin.TaskCategory{Name: taskTemplate.GetType(), Version: taskTemplate.GetTaskTypeVersion()}
agent, isSync := p.getFinalAgent(&taskCategory, p.cfg)
taskExecutionMetadata := buildTaskExecutionMetadata(taskCtx.TaskExecutionMetadata())
@@ -195,11 +195,11 @@ func (p *Plugin) ExecuteTaskSync(
resource := in.GetHeader().GetResource()
return nil, ResourceWrapper{
- Phase: resource.Phase,
- Outputs: resource.Outputs,
- Message: resource.Message,
- LogLinks: resource.LogLinks,
- CustomInfo: resource.CustomInfo,
+ Phase: resource.GetPhase(),
+ Outputs: resource.GetOutputs(),
+ Message: resource.GetMessage(),
+ LogLinks: resource.GetLogLinks(),
+ CustomInfo: resource.GetCustomInfo(),
}, err
}
@@ -215,7 +215,7 @@ func (p *Plugin) Get(ctx context.Context, taskCtx webapi.GetContext) (latest web
defer cancel()
request := &admin.GetTaskRequest{
- TaskType: metadata.TaskCategory.Name,
+ TaskType: metadata.TaskCategory.GetName(),
TaskCategory: &metadata.TaskCategory,
ResourceMeta: metadata.AgentResourceMeta,
}
@@ -225,12 +225,12 @@ func (p *Plugin) Get(ctx context.Context, taskCtx webapi.GetContext) (latest web
}
return ResourceWrapper{
- Phase: res.Resource.Phase,
- State: res.Resource.State,
- Outputs: res.Resource.Outputs,
- Message: res.Resource.Message,
- LogLinks: res.Resource.LogLinks,
- CustomInfo: res.Resource.CustomInfo,
+ Phase: res.GetResource().GetPhase(),
+ State: res.GetResource().GetState(),
+ Outputs: res.GetResource().GetOutputs(),
+ Message: res.GetResource().GetMessage(),
+ LogLinks: res.GetResource().GetLogLinks(),
+ CustomInfo: res.GetResource().GetCustomInfo(),
}, nil
}
@@ -249,7 +249,7 @@ func (p *Plugin) Delete(ctx context.Context, taskCtx webapi.DeleteContext) error
defer cancel()
request := &admin.DeleteTaskRequest{
- TaskType: metadata.TaskCategory.Name,
+ TaskType: metadata.TaskCategory.GetName(),
TaskCategory: &metadata.TaskCategory,
ResourceMeta: metadata.AgentResourceMeta,
}
@@ -350,7 +350,7 @@ func (p *Plugin) getFinalAgent(taskCategory *admin.TaskCategory, cfg *Config) (*
p.mu.RLock()
defer p.mu.RUnlock()
- if agent, exists := p.registry[taskCategory.Name][taskCategory.Version]; exists {
+ if agent, exists := p.registry[taskCategory.GetName()][taskCategory.GetVersion()]; exists {
return agent.AgentDeployment, agent.IsSync
}
return &cfg.DefaultAgent, false
@@ -362,7 +362,7 @@ func writeOutput(ctx context.Context, taskCtx webapi.StatusContext, outputs *fly
return err
}
- if taskTemplate.Interface == nil || taskTemplate.Interface.Outputs == nil || taskTemplate.Interface.Outputs.Variables == nil {
+ if taskTemplate.GetInterface() == nil || taskTemplate.GetInterface().GetOutputs() == nil || taskTemplate.Interface.Outputs.Variables == nil {
logger.Debugf(ctx, "The task declares no outputs. Skipping writing the outputs.")
return nil
}
@@ -388,7 +388,7 @@ func buildTaskExecutionMetadata(taskExecutionMetadata core.TaskExecutionMetadata
Annotations: taskExecutionMetadata.GetAnnotations(),
K8SServiceAccount: taskExecutionMetadata.GetK8sServiceAccount(),
EnvironmentVariables: taskExecutionMetadata.GetEnvironmentVariables(),
- Identity: taskExecutionMetadata.GetSecurityContext().RunAs,
+ Identity: taskExecutionMetadata.GetSecurityContext().RunAs, // nolint:protogetter
}
}
diff --git a/flyteplugins/go/tasks/plugins/webapi/athena/utils.go b/flyteplugins/go/tasks/plugins/webapi/athena/utils.go
index 761e81842a..1ed1fbaea4 100644
--- a/flyteplugins/go/tasks/plugins/webapi/athena/utils.go
+++ b/flyteplugins/go/tasks/plugins/webapi/athena/utils.go
@@ -19,12 +19,12 @@ func writeOutput(ctx context.Context, tCtx webapi.StatusContext, externalLocatio
return err
}
- if taskTemplate.Interface == nil || taskTemplate.Interface.Outputs == nil || taskTemplate.Interface.Outputs.Variables == nil {
+ if taskTemplate.GetInterface() == nil || taskTemplate.GetInterface().GetOutputs() == nil || taskTemplate.Interface.Outputs.Variables == nil {
logger.Infof(ctx, "The task declares no outputs. Skipping writing the outputs.")
return nil
}
- resultsSchema, exists := taskTemplate.Interface.Outputs.Variables["results"]
+ resultsSchema, exists := taskTemplate.GetInterface().GetOutputs().GetVariables()["results"]
if !exists {
logger.Infof(ctx, "The task declares no outputs. Skipping writing the outputs.")
return nil
@@ -56,11 +56,11 @@ type QueryInfo struct {
}
func validateHiveQuery(hiveQuery pluginsIdl.QuboleHiveJob) error {
- if hiveQuery.Query == nil {
+ if hiveQuery.GetQuery() == nil {
return errors.Errorf(errors.BadTaskSpecification, "Query is a required field.")
}
- if len(hiveQuery.Query.Query) == 0 {
+ if len(hiveQuery.GetQuery().GetQuery()) == 0 {
return errors.Errorf(errors.BadTaskSpecification, "Query statement is a required field.")
}
@@ -68,7 +68,7 @@ func validateHiveQuery(hiveQuery pluginsIdl.QuboleHiveJob) error {
}
func validatePrestoQuery(prestoQuery pluginsIdl.PrestoQuery) error {
- if len(prestoQuery.Statement) == 0 {
+ if len(prestoQuery.GetStatement()) == 0 {
return errors.Errorf(errors.BadTaskSpecification, "Statement is a required field.")
}
@@ -81,7 +81,7 @@ func extractQueryInfo(ctx context.Context, tCtx webapi.TaskExecutionContextReade
return QueryInfo{}, err
}
- switch task.Type {
+ switch task.GetType() {
case "hive":
custom := task.GetCustom()
hiveQuery := pluginsIdl.QuboleHiveJob{}
@@ -95,8 +95,8 @@ func extractQueryInfo(ctx context.Context, tCtx webapi.TaskExecutionContextReade
}
outputs, err := template.Render(ctx, []string{
- hiveQuery.Query.Query,
- hiveQuery.ClusterLabel,
+ hiveQuery.GetQuery().GetQuery(),
+ hiveQuery.GetClusterLabel(),
}, template.Parameters{
TaskExecMetadata: tCtx.TaskExecutionMetadata(),
Inputs: tCtx.InputReader(),
@@ -124,10 +124,10 @@ func extractQueryInfo(ctx context.Context, tCtx webapi.TaskExecutionContextReade
}
outputs, err := template.Render(ctx, []string{
- prestoQuery.RoutingGroup,
- prestoQuery.Catalog,
- prestoQuery.Schema,
- prestoQuery.Statement,
+ prestoQuery.GetRoutingGroup(),
+ prestoQuery.GetCatalog(),
+ prestoQuery.GetSchema(),
+ prestoQuery.GetStatement(),
}, template.Parameters{
TaskExecMetadata: tCtx.TaskExecutionMetadata(),
Inputs: tCtx.InputReader(),
@@ -146,5 +146,5 @@ func extractQueryInfo(ctx context.Context, tCtx webapi.TaskExecutionContextReade
}, nil
}
- return QueryInfo{}, errors.Errorf(ErrUser, "Unexpected task type [%v].", task.Type)
+ return QueryInfo{}, errors.Errorf(ErrUser, "Unexpected task type [%v].", task.GetType())
}
diff --git a/flyteplugins/go/tasks/plugins/webapi/bigquery/plugin.go b/flyteplugins/go/tasks/plugins/webapi/bigquery/plugin.go
index ad7da5f042..fca1eee954 100644
--- a/flyteplugins/go/tasks/plugins/webapi/bigquery/plugin.go
+++ b/flyteplugins/go/tasks/plugins/webapi/bigquery/plugin.go
@@ -95,17 +95,17 @@ func (p Plugin) createImpl(ctx context.Context, taskCtx webapi.TaskExecutionCont
return nil, nil, pluginErrors.Wrapf(pluginErrors.RuntimeFailure, err, "unable to get bigquery client")
}
- if taskTemplate.Type == bigqueryQueryJobTask {
+ if taskTemplate.GetType() == bigqueryQueryJobTask {
job, err = createQueryJob(jobID, taskTemplate.GetCustom(), inputs)
} else {
- err = pluginErrors.Errorf(pluginErrors.BadTaskSpecification, "unexpected task type [%v]", taskTemplate.Type)
+ err = pluginErrors.Errorf(pluginErrors.BadTaskSpecification, "unexpected task type [%v]", taskTemplate.GetType())
}
if err != nil {
return nil, nil, err
}
- job.Configuration.Query.Query = taskTemplate.GetSql().Statement
+ job.Configuration.Query.Query = taskTemplate.GetSql().GetStatement()
job.Configuration.Labels = taskCtx.TaskExecutionMetadata().GetLabels()
resp, err := client.Jobs.Insert(job.JobReference.ProjectId, job).Do()
@@ -317,12 +317,12 @@ func writeOutput(ctx context.Context, tCtx webapi.StatusContext, OutputLocation
return err
}
- if taskTemplate.Interface == nil || taskTemplate.Interface.Outputs == nil || taskTemplate.Interface.Outputs.Variables == nil {
+ if taskTemplate.GetInterface() == nil || taskTemplate.GetInterface().GetOutputs() == nil || taskTemplate.Interface.Outputs.Variables == nil {
logger.Infof(ctx, "The task declares no outputs. Skipping writing the outputs.")
return nil
}
- resultsStructuredDatasetType, exists := taskTemplate.Interface.Outputs.Variables["results"]
+ resultsStructuredDatasetType, exists := taskTemplate.GetInterface().GetOutputs().GetVariables()["results"]
if !exists {
logger.Infof(ctx, "The task declares no outputs. Skipping writing the outputs.")
return nil
diff --git a/flyteplugins/go/tasks/plugins/webapi/bigquery/plugin_test.go b/flyteplugins/go/tasks/plugins/webapi/bigquery/plugin_test.go
index 939fe0577a..8682350986 100644
--- a/flyteplugins/go/tasks/plugins/webapi/bigquery/plugin_test.go
+++ b/flyteplugins/go/tasks/plugins/webapi/bigquery/plugin_test.go
@@ -105,9 +105,9 @@ func TestOutputWriter(t *testing.T) {
assert.NoError(t, err)
sd := literals.GetLiterals()["results"].GetScalar().GetStructuredDataset()
- assert.Equal(t, sd.Uri, outputLocation)
- assert.Equal(t, sd.Metadata.GetStructuredDatasetType().Columns[0].Name, "col1")
- assert.Equal(t, sd.Metadata.GetStructuredDatasetType().Columns[0].LiteralType.GetSimple(), flyteIdlCore.SimpleType_INTEGER)
+ assert.Equal(t, sd.GetUri(), outputLocation)
+ assert.Equal(t, sd.GetMetadata().GetStructuredDatasetType().GetColumns()[0].GetName(), "col1")
+ assert.Equal(t, sd.GetMetadata().GetStructuredDatasetType().GetColumns()[0].GetLiteralType().GetSimple(), flyteIdlCore.SimpleType_INTEGER)
if ee != nil {
assert.NoError(t, ds.WriteProtobuf(ctx, outputWriter.GetErrorPath(), storage.Options{}, ee))
@@ -307,9 +307,9 @@ func TestHandleErrorResult(t *testing.T) {
phaseInfo := handleErrorResult(test.reason, "message", &taskInfo)
assert.Equal(t, test.phase, phaseInfo.Phase())
- assert.Equal(t, test.reason, phaseInfo.Err().Code)
- assert.Equal(t, test.errorKind, phaseInfo.Err().Kind)
- assert.Equal(t, "message", phaseInfo.Err().Message)
+ assert.Equal(t, test.reason, phaseInfo.Err().GetCode())
+ assert.Equal(t, test.errorKind, phaseInfo.Err().GetKind())
+ assert.Equal(t, "message", phaseInfo.Err().GetMessage())
})
}
}
diff --git a/flyteplugins/go/tasks/plugins/webapi/bigquery/query_job.go b/flyteplugins/go/tasks/plugins/webapi/bigquery/query_job.go
index 7ce788e0fe..fe558f9d0c 100644
--- a/flyteplugins/go/tasks/plugins/webapi/bigquery/query_job.go
+++ b/flyteplugins/go/tasks/plugins/webapi/bigquery/query_job.go
@@ -155,7 +155,7 @@ func unmarshalQueryJobConfig(structObj *structpb.Struct) (*QueryJobConfig, error
}
func getJobConfigurationQuery(custom *QueryJobConfig, inputs *flyteIdlCore.LiteralMap) (*bigquery.JobConfigurationQuery, error) {
- queryParameters, err := getQueryParameters(inputs.Literals)
+ queryParameters, err := getQueryParameters(inputs.GetLiterals())
if err != nil {
return nil, pluginErrors.Errorf(pluginErrors.BadTaskSpecification, "unable build query parameters [%v]", err.Error())
@@ -216,7 +216,7 @@ func getQueryParameters(literalMap map[string]*flyteIdlCore.Literal) ([]*bigquer
func getQueryParameter(literal *flyteIdlCore.Literal) (*bigquery.QueryParameterType, *bigquery.QueryParameterValue, error) {
if scalar := literal.GetScalar(); scalar != nil {
if primitive := scalar.GetPrimitive(); primitive != nil {
- switch primitive.Value.(type) {
+ switch primitive.GetValue().(type) {
case *flyteIdlCore.Primitive_Integer:
integerType := bigquery.QueryParameterType{Type: "INT64"}
integerValue := bigquery.QueryParameterValue{
diff --git a/flyteplugins/go/tasks/plugins/webapi/databricks/plugin.go b/flyteplugins/go/tasks/plugins/webapi/databricks/plugin.go
index 6ae9a1dbe5..d889392c59 100644
--- a/flyteplugins/go/tasks/plugins/webapi/databricks/plugin.go
+++ b/flyteplugins/go/tasks/plugins/webapi/databricks/plugin.go
@@ -96,8 +96,8 @@ func (p Plugin) Create(ctx context.Context, taskCtx webapi.TaskExecutionContextR
}
// override the default token in propeller
- if len(sparkJob.DatabricksToken) != 0 {
- token = sparkJob.DatabricksToken
+ if len(sparkJob.GetDatabricksToken()) != 0 {
+ token = sparkJob.GetDatabricksToken()
}
modifiedArgs, err := template.Render(ctx, container.GetArgs(), template.Parameters{
TaskExecMetadata: taskCtx.TaskExecutionMetadata(),
@@ -110,20 +110,20 @@ func (p Plugin) Create(ctx context.Context, taskCtx webapi.TaskExecutionContextR
}
databricksJob := make(map[string]interface{})
- err = utils.UnmarshalStructToObj(sparkJob.DatabricksConf, &databricksJob)
+ err = utils.UnmarshalStructToObj(sparkJob.GetDatabricksConf(), &databricksJob)
if err != nil {
- return nil, nil, fmt.Errorf("failed to unmarshal databricksJob: %v: %v", sparkJob.DatabricksConf, err)
+ return nil, nil, fmt.Errorf("failed to unmarshal databricksJob: %v: %v", sparkJob.GetDatabricksConf(), err)
}
// If "existing_cluster_id" is in databricks_job, then we don't need to set "new_cluster"
// Refer the docs here: https://docs.databricks.com/en/workflows/jobs/jobs-2.0-api.html#request-structure
if clusterConfig, ok := databricksJob[newCluster].(map[string]interface{}); ok {
if dockerConfig, ok := clusterConfig[dockerImage].(map[string]interface{}); !ok || dockerConfig[url] == nil {
- clusterConfig[dockerImage] = map[string]string{url: container.Image}
+ clusterConfig[dockerImage] = map[string]string{url: container.GetImage()}
}
- if clusterConfig[sparkConfig] == nil && len(sparkJob.SparkConf) != 0 {
- clusterConfig[sparkConfig] = sparkJob.SparkConf
+ if clusterConfig[sparkConfig] == nil && len(sparkJob.GetSparkConf()) != 0 {
+ clusterConfig[sparkConfig] = sparkJob.GetSparkConf()
}
}
databricksJob[sparkPythonTask] = map[string]interface{}{pythonFile: p.cfg.EntrypointFile, parameters: modifiedArgs}
@@ -299,7 +299,7 @@ func writeOutput(ctx context.Context, taskCtx webapi.StatusContext) error {
if err != nil {
return err
}
- if taskTemplate.Interface == nil || taskTemplate.Interface.Outputs == nil || taskTemplate.Interface.Outputs.Variables == nil {
+ if taskTemplate.GetInterface() == nil || taskTemplate.GetInterface().GetOutputs() == nil || taskTemplate.Interface.Outputs.Variables == nil {
logger.Infof(ctx, "The task declares no outputs. Skipping writing the outputs.")
return nil
}
diff --git a/flyteplugins/go/tasks/plugins/webapi/databricks/plugin_test.go b/flyteplugins/go/tasks/plugins/webapi/databricks/plugin_test.go
index 228914af93..3cdecf6872 100644
--- a/flyteplugins/go/tasks/plugins/webapi/databricks/plugin_test.go
+++ b/flyteplugins/go/tasks/plugins/webapi/databricks/plugin_test.go
@@ -148,7 +148,7 @@ func TestCreateTaskInfo(t *testing.T) {
taskInfo := createTaskInfo("run-id", "job-id", testInstance)
assert.Equal(t, 1, len(taskInfo.Logs))
- assert.Equal(t, taskInfo.Logs[0].Uri, "https://test-account.cloud.databricks.com/#job/job-id/run/run-id")
- assert.Equal(t, taskInfo.Logs[0].Name, "Databricks Console")
+ assert.Equal(t, taskInfo.Logs[0].GetUri(), "https://test-account.cloud.databricks.com/#job/job-id/run/run-id")
+ assert.Equal(t, taskInfo.Logs[0].GetName(), "Databricks Console")
})
}
diff --git a/flyteplugins/go/tasks/plugins/webapi/snowflake/plugin.go b/flyteplugins/go/tasks/plugins/webapi/snowflake/plugin.go
index 02bf947fd4..c0728a79a7 100644
--- a/flyteplugins/go/tasks/plugins/webapi/snowflake/plugin.go
+++ b/flyteplugins/go/tasks/plugins/webapi/snowflake/plugin.go
@@ -84,7 +84,7 @@ func (p Plugin) Create(ctx context.Context, taskCtx webapi.TaskExecutionContextR
config := task.GetConfig()
outputs, err := template.Render(ctx, []string{
- task.GetSql().Statement,
+ task.GetSql().GetStatement(),
}, template.Parameters{
TaskExecMetadata: taskCtx.TaskExecutionMetadata(),
Inputs: taskCtx.InputReader(),
diff --git a/flyteplugins/go/tasks/plugins/webapi/snowflake/plugin_test.go b/flyteplugins/go/tasks/plugins/webapi/snowflake/plugin_test.go
index 7657a9e315..3de8f8a6b8 100644
--- a/flyteplugins/go/tasks/plugins/webapi/snowflake/plugin_test.go
+++ b/flyteplugins/go/tasks/plugins/webapi/snowflake/plugin_test.go
@@ -57,8 +57,8 @@ func TestCreateTaskInfo(t *testing.T) {
taskInfo := createTaskInfo("d5493e36", "test-account")
assert.Equal(t, 1, len(taskInfo.Logs))
- assert.Equal(t, taskInfo.Logs[0].Uri, "https://test-account.snowflakecomputing.com/console#/monitoring/queries/detail?queryId=d5493e36")
- assert.Equal(t, taskInfo.Logs[0].Name, "Snowflake Console")
+ assert.Equal(t, taskInfo.Logs[0].GetUri(), "https://test-account.snowflakecomputing.com/console#/monitoring/queries/detail?queryId=d5493e36")
+ assert.Equal(t, taskInfo.Logs[0].GetName(), "Snowflake Console")
})
}
diff --git a/flytepropeller/.golangci.yml b/flytepropeller/.golangci.yml
index 6d13f4a3b6..77107079d0 100644
--- a/flytepropeller/.golangci.yml
+++ b/flytepropeller/.golangci.yml
@@ -1,35 +1,25 @@
-# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES.
-# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY:
-#
-# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst
-
run:
skip-dirs:
- pkg/client
-
linters:
disable-all: true
enable:
- - deadcode
- errcheck
- - gas
+ - gosec
- gci
- goconst
- goimports
- - golint
- gosimple
- govet
- ineffassign
- misspell
- nakedret
- staticcheck
- - structcheck
- typecheck
- unconvert
- unparam
- unused
- - varcheck
-
+ - protogetter
linters-settings:
gci:
custom-order: true
@@ -38,6 +28,8 @@ linters-settings:
- default
- prefix(github.com/flyteorg)
skip-generated: true
+ goconst:
+ ignore-tests: true
issues:
exclude:
- copylocks
diff --git a/flytepropeller/cmd/kubectl-flyte/cmd/compile.go b/flytepropeller/cmd/kubectl-flyte/cmd/compile.go
index 056b546849..c91e10c183 100644
--- a/flytepropeller/cmd/kubectl-flyte/cmd/compile.go
+++ b/flytepropeller/cmd/kubectl-flyte/cmd/compile.go
@@ -76,18 +76,18 @@ func (c *CompileOpts) compileWorkflowCmd() error {
if err != nil {
return err
}
- err = ioutil.WriteFile(c.protoFile+".yaml", b, os.ModePerm)
+ err = os.WriteFile(c.protoFile+".yaml", b, os.ModePerm) // #nosec G306
if err != nil {
return err
}
}
- compiledTasks, err := compileTasks(wfClosure.Tasks)
+ compiledTasks, err := compileTasks(wfClosure.GetTasks())
if err != nil {
return err
}
- compileWfClosure, err := compiler.CompileWorkflow(wfClosure.Workflow, []*core.WorkflowTemplate{}, compiledTasks, []common.InterfaceProvider{})
+ compileWfClosure, err := compiler.CompileWorkflow(wfClosure.GetWorkflow(), []*core.WorkflowTemplate{}, compiledTasks, []common.InterfaceProvider{})
if err != nil {
return err
}
@@ -100,7 +100,7 @@ func (c *CompileOpts) compileWorkflowCmd() error {
}
if c.outputPath != "" {
- return ioutil.WriteFile(c.outputPath, o, os.ModePerm)
+ return os.WriteFile(c.outputPath, o, os.ModePerm) // #nosec G306
}
fmt.Printf("%v", string(o))
return nil
diff --git a/flytepropeller/cmd/kubectl-flyte/cmd/create.go b/flytepropeller/cmd/kubectl-flyte/cmd/create.go
index 2feeb8ec8e..3cf463b604 100644
--- a/flytepropeller/cmd/kubectl-flyte/cmd/create.go
+++ b/flytepropeller/cmd/kubectl-flyte/cmd/create.go
@@ -160,12 +160,12 @@ func (c *CreateOpts) createWorkflowFromProto() error {
return err
}
- compiledTasks, err := compileTasks(wfClosure.Tasks)
+ compiledTasks, err := compileTasks(wfClosure.GetTasks())
if err != nil {
return err
}
- wf, err := compiler.CompileWorkflow(wfClosure.Workflow, []*core.WorkflowTemplate{}, compiledTasks, []common.InterfaceProvider{})
+ wf, err := compiler.CompileWorkflow(wfClosure.GetWorkflow(), []*core.WorkflowTemplate{}, compiledTasks, []common.InterfaceProvider{})
if err != nil {
return err
}
@@ -182,8 +182,8 @@ func (c *CreateOpts) createWorkflowFromProto() error {
if len(c.execID) > 0 {
executionID = &core.WorkflowExecutionIdentifier{
Name: c.execID,
- Domain: wfClosure.Workflow.Id.Domain,
- Project: wfClosure.Workflow.Id.Project,
+ Domain: wfClosure.GetWorkflow().GetId().GetDomain(),
+ Project: wfClosure.GetWorkflow().GetId().GetProject(),
}
}
diff --git a/flytepropeller/cmd/kubectl-flyte/cmd/create_test.go b/flytepropeller/cmd/kubectl-flyte/cmd/create_test.go
index 5036201482..65bb2ecae1 100644
--- a/flytepropeller/cmd/kubectl-flyte/cmd/create_test.go
+++ b/flytepropeller/cmd/kubectl-flyte/cmd/create_test.go
@@ -3,7 +3,6 @@ package cmd
import (
"encoding/json"
"flag"
- "io/ioutil"
"os"
"path/filepath"
"testing"
@@ -113,7 +112,7 @@ func generateSimpleWorkflow(t *testing.T) {
marshaller := &jsonpb.Marshaler{}
s, err := marshaller.MarshalToString(&closure)
assert.NoError(t, err)
- assert.NoError(t, ioutil.WriteFile(filepath.Join("testdata", "workflow.json.golden"), []byte(s), os.ModePerm))
+ assert.NoError(t, os.WriteFile(filepath.Join("testdata", "workflow.json.golden"), []byte(s), os.ModePerm)) // #nosec G306
m := map[string]interface{}{}
err = json.Unmarshal([]byte(s), &m)
@@ -121,11 +120,11 @@ func generateSimpleWorkflow(t *testing.T) {
b, err := yaml.Marshal(m)
assert.NoError(t, err)
- assert.NoError(t, ioutil.WriteFile(filepath.Join("testdata", "workflow.yaml.golden"), b, os.ModePerm))
+ assert.NoError(t, os.WriteFile(filepath.Join("testdata", "workflow.yaml.golden"), b, os.ModePerm)) // #nosec G306
raw, err := proto.Marshal(&closure)
assert.NoError(t, err)
- assert.NoError(t, ioutil.WriteFile(filepath.Join("testdata", "workflow.pb.golden"), raw, os.ModePerm))
+ assert.NoError(t, os.WriteFile(filepath.Join("testdata", "workflow.pb.golden"), raw, os.ModePerm)) // #nosec G306
}
func generateWorkflowWithInputs(t *testing.T) {
@@ -242,7 +241,7 @@ func marshalGolden(t *testing.T, message proto.Message, filename string) {
marshaller := &jsonpb.Marshaler{}
s, err := marshaller.MarshalToString(message)
assert.NoError(t, err)
- assert.NoError(t, ioutil.WriteFile(filepath.Join("testdata", filename+".json.golden"), []byte(s), os.ModePerm))
+ assert.NoError(t, os.WriteFile(filepath.Join("testdata", filename+".json.golden"), []byte(s), os.ModePerm)) // #nosec G306
m := map[string]interface{}{}
err = json.Unmarshal([]byte(s), &m)
@@ -250,28 +249,28 @@ func marshalGolden(t *testing.T, message proto.Message, filename string) {
b, err := yaml.Marshal(m)
assert.NoError(t, err)
- assert.NoError(t, ioutil.WriteFile(filepath.Join("testdata", filename+".yaml.golden"), b, os.ModePerm))
+ assert.NoError(t, os.WriteFile(filepath.Join("testdata", filename+".yaml.golden"), b, os.ModePerm)) // #nosec G306
raw, err := proto.Marshal(message)
assert.NoError(t, err)
- assert.NoError(t, ioutil.WriteFile(filepath.Join("testdata", filename+".pb.golden"), raw, os.ModePerm))
+ assert.NoError(t, os.WriteFile(filepath.Join("testdata", filename+".pb.golden"), raw, os.ModePerm)) // #nosec G306
}
func testCompile(t *testing.T) {
f := func(t *testing.T, filePath, format string) {
- raw, err := ioutil.ReadFile(filepath.Join("testdata", filePath))
+ raw, err := os.ReadFile(filepath.Join("testdata", filePath))
assert.NoError(t, err)
wf := &core.WorkflowClosure{}
err = unmarshal(raw, format, wf)
assert.NoError(t, err)
assert.NotNil(t, wf)
- assert.Equal(t, 2, len(wf.Tasks))
- if len(wf.Tasks) == 2 {
- c := wf.Tasks[0].GetContainer()
+ assert.Equal(t, 2, len(wf.GetTasks()))
+ if len(wf.GetTasks()) == 2 {
+ c := wf.GetTasks()[0].GetContainer()
assert.NotNil(t, c)
- compiledTasks, err := compileTasks(wf.Tasks)
+ compiledTasks, err := compileTasks(wf.GetTasks())
assert.NoError(t, err)
- compiledWf, err := compiler.CompileWorkflow(wf.Workflow, []*core.WorkflowTemplate{}, compiledTasks, []common.InterfaceProvider{})
+ compiledWf, err := compiler.CompileWorkflow(wf.GetWorkflow(), []*core.WorkflowTemplate{}, compiledTasks, []common.InterfaceProvider{})
assert.NoError(t, err)
_, err = k8s.BuildFlyteWorkflow(compiledWf, nil, nil, "")
assert.NoError(t, err)
diff --git a/flytepropeller/events/admin_eventsink.go b/flytepropeller/events/admin_eventsink.go
index 3da6cca421..cc9c57661c 100644
--- a/flytepropeller/events/admin_eventsink.go
+++ b/flytepropeller/events/admin_eventsink.go
@@ -116,17 +116,17 @@ func IDFromMessage(message proto.Message) ([]byte, error) {
var id string
switch eventMessage := message.(type) {
case *event.WorkflowExecutionEvent:
- wid := eventMessage.ExecutionId
- id = fmt.Sprintf("%s:%s:%s:%d", wid.Project, wid.Domain, wid.Name, eventMessage.Phase)
+ wid := eventMessage.GetExecutionId()
+ id = fmt.Sprintf("%s:%s:%s:%d", wid.GetProject(), wid.GetDomain(), wid.GetName(), eventMessage.GetPhase())
case *event.NodeExecutionEvent:
- nid := eventMessage.Id
- wid := nid.ExecutionId
- id = fmt.Sprintf("%s:%s:%s:%s:%s:%d", wid.Project, wid.Domain, wid.Name, nid.NodeId, eventMessage.RetryGroup, eventMessage.Phase)
+ nid := eventMessage.GetId()
+ wid := nid.GetExecutionId()
+ id = fmt.Sprintf("%s:%s:%s:%s:%s:%d", wid.GetProject(), wid.GetDomain(), wid.GetName(), nid.GetNodeId(), eventMessage.GetRetryGroup(), eventMessage.GetPhase())
case *event.TaskExecutionEvent:
- tid := eventMessage.TaskId
- nid := eventMessage.ParentNodeExecutionId
- wid := nid.ExecutionId
- id = fmt.Sprintf("%s:%s:%s:%s:%s:%s:%d:%d:%d", wid.Project, wid.Domain, wid.Name, nid.NodeId, tid.Name, tid.Version, eventMessage.RetryAttempt, eventMessage.Phase, eventMessage.PhaseVersion)
+ tid := eventMessage.GetTaskId()
+ nid := eventMessage.GetParentNodeExecutionId()
+ wid := nid.GetExecutionId()
+ id = fmt.Sprintf("%s:%s:%s:%s:%s:%s:%d:%d:%d", wid.GetProject(), wid.GetDomain(), wid.GetName(), nid.GetNodeId(), tid.GetName(), tid.GetVersion(), eventMessage.GetRetryAttempt(), eventMessage.GetPhase(), eventMessage.GetPhaseVersion())
default:
return nil, fmt.Errorf("unknown event type [%s]", eventMessage.String())
}
@@ -140,7 +140,7 @@ func initializeAdminClientFromConfig(ctx context.Context, config *Config) (clien
grpcOptions := []grpcRetry.CallOption{
grpcRetry.WithBackoff(grpcRetry.BackoffExponentialWithJitter(time.Duration(config.BackoffScalar)*time.Millisecond, config.GetBackoffJitter(ctx))),
- grpcRetry.WithMax(uint(config.MaxRetries)),
+ grpcRetry.WithMax(uint(config.MaxRetries)), // #nosec G115
}
opt := grpc.WithChainUnaryInterceptor(
diff --git a/flytepropeller/events/admin_eventsink_test.go b/flytepropeller/events/admin_eventsink_test.go
index 510371d056..e3a0d57dba 100644
--- a/flytepropeller/events/admin_eventsink_test.go
+++ b/flytepropeller/events/admin_eventsink_test.go
@@ -86,7 +86,7 @@ func TestAdminWorkflowEvent(t *testing.T) {
"CreateWorkflowEvent",
ctx,
mock.MatchedBy(func(req *admin.WorkflowExecutionEventRequest) bool {
- return req.Event == wfEvent
+ return req.GetEvent() == wfEvent
},
)).Return(&admin.WorkflowExecutionEventResponse{}, nil)
@@ -104,7 +104,7 @@ func TestAdminNodeEvent(t *testing.T) {
"CreateNodeEvent",
ctx,
mock.MatchedBy(func(req *admin.NodeExecutionEventRequest) bool {
- return req.Event == nodeEvent
+ return req.GetEvent() == nodeEvent
}),
).Return(&admin.NodeExecutionEventResponse{}, nil)
@@ -122,7 +122,7 @@ func TestAdminTaskEvent(t *testing.T) {
"CreateTaskEvent",
ctx,
mock.MatchedBy(func(req *admin.TaskExecutionEventRequest) bool {
- return req.Event == taskEvent
+ return req.GetEvent() == taskEvent
}),
).Return(&admin.TaskExecutionEventResponse{}, nil)
@@ -159,7 +159,7 @@ func TestAdminRateLimitError(t *testing.T) {
"CreateTaskEvent",
ctx,
mock.MatchedBy(func(req *admin.TaskExecutionEventRequest) bool {
- return req.Event == taskEvent
+ return req.GetEvent() == taskEvent
}),
).Return(&admin.TaskExecutionEventResponse{}, nil)
diff --git a/flytepropeller/events/errors/errors.go b/flytepropeller/events/errors/errors.go
index 2d3e02e0df..11c603bad8 100644
--- a/flytepropeller/events/errors/errors.go
+++ b/flytepropeller/events/errors/errors.go
@@ -74,7 +74,7 @@ func WrapError(err error) error {
phase := reason.AlreadyInTerminalState.GetCurrentPhase()
return wrapf(EventAlreadyInTerminalStateError, err, fmt.Sprintf("conflicting events; destination: %v", phase))
case *admin.EventFailureReason_IncompatibleCluster:
- return wrapf(EventIncompatibleCusterError, err, fmt.Sprintf("conflicting execution cluster; expected: %v", reason.IncompatibleCluster.Cluster))
+ return wrapf(EventIncompatibleCusterError, err, fmt.Sprintf("conflicting execution cluster; expected: %v", reason.IncompatibleCluster.GetCluster()))
default:
logger.Warnf(context.Background(), "found unexpected type in details of grpc status: %v", reason)
}
diff --git a/flytepropeller/events/event_recorder.go b/flytepropeller/events/event_recorder.go
index 310797f081..9390e04bf4 100644
--- a/flytepropeller/events/event_recorder.go
+++ b/flytepropeller/events/event_recorder.go
@@ -86,8 +86,8 @@ func (r *eventRecorder) RecordWorkflowEvent(ctx context.Context, e *event.Workfl
// If error message too large, truncate to mitigate grpc message size limit. Split the truncated size equally between
// the beginning and the end of the message to capture the most relevant information.
func truncateErrorMessage(err *core.ExecutionError, length int) {
- if len(err.Message) > length {
- err.Message = fmt.Sprintf("%s\n%s\n%s", err.Message[:length/2], truncationIndicator, err.Message[(len(err.Message)-length/2):])
+ if len(err.GetMessage()) > length {
+ err.Message = fmt.Sprintf("%s\n%s\n%s", err.GetMessage()[:length/2], truncationIndicator, err.GetMessage()[(len(err.GetMessage())-length/2):])
}
}
diff --git a/flytepropeller/events/event_recorder_test.go b/flytepropeller/events/event_recorder_test.go
index 2b633b72ff..32c1193361 100644
--- a/flytepropeller/events/event_recorder_test.go
+++ b/flytepropeller/events/event_recorder_test.go
@@ -96,6 +96,6 @@ func TestTruncateErrorMessage(t *testing.T) {
}
truncateErrorMessage(&executionError, length)
- assert.True(t, len(executionError.Message) <= length+len(truncationIndicator)+2)
+ assert.True(t, len(executionError.GetMessage()) <= length+len(truncationIndicator)+2)
}
}
diff --git a/flytepropeller/events/eventsink_test.go b/flytepropeller/events/eventsink_test.go
index 46aa5e46db..d488398d7b 100644
--- a/flytepropeller/events/eventsink_test.go
+++ b/flytepropeller/events/eventsink_test.go
@@ -62,11 +62,11 @@ func TestFileEvent(t *testing.T) {
taskEvent := &event.TaskExecutionEvent{
TaskId: &core.Identifier{
ResourceType: core.ResourceType_TASK,
- Project: executionID.Project,
- Domain: executionID.Domain,
- Name: executionID.Name,
+ Project: executionID.GetProject(),
+ Domain: executionID.GetDomain(),
+ Name: executionID.GetName(),
},
- ParentNodeExecutionId: nodeEvent.Id,
+ ParentNodeExecutionId: nodeEvent.GetId(),
Phase: core.TaskExecution_FAILED,
OccurredAt: now,
}
diff --git a/flytepropeller/events/local_eventsink.go b/flytepropeller/events/local_eventsink.go
index fdcd5408a4..2300942bab 100644
--- a/flytepropeller/events/local_eventsink.go
+++ b/flytepropeller/events/local_eventsink.go
@@ -26,13 +26,13 @@ func (s *localSink) Sink(ctx context.Context, message proto.Message) error {
switch e := message.(type) {
case *event.WorkflowExecutionEvent:
eventOutput = fmt.Sprintf("[--WF EVENT--] %s, Phase: %s, OccuredAt: %s\n",
- e.ExecutionId, e.Phase, e.OccurredAt.AsTime().String())
+ e.GetExecutionId(), e.GetPhase(), e.GetOccurredAt().AsTime().String())
case *event.NodeExecutionEvent:
eventOutput = fmt.Sprintf("[--NODE EVENT--] %s, Phase: %s, OccuredAt: %s\n",
- e.Id, e.Phase, e.OccurredAt.AsTime().String())
+ e.GetId(), e.GetPhase(), e.GetOccurredAt().AsTime().String())
case *event.TaskExecutionEvent:
eventOutput = fmt.Sprintf("[--TASK EVENT--] %s,%s, Phase: %s, OccuredAt: %s\n",
- e.TaskId, e.ParentNodeExecutionId, e.Phase, e.OccurredAt.AsTime().String())
+ e.GetTaskId(), e.GetParentNodeExecutionId(), e.GetPhase(), e.GetOccurredAt().AsTime().String())
}
return s.writer.Write(ctx, eventOutput)
diff --git a/flytepropeller/events/node_event_recorder.go b/flytepropeller/events/node_event_recorder.go
index 8beb488ce6..c283401614 100644
--- a/flytepropeller/events/node_event_recorder.go
+++ b/flytepropeller/events/node_event_recorder.go
@@ -69,7 +69,7 @@ func (r *nodeEventRecorder) RecordNodeEvent(ctx context.Context, ev *event.NodeE
logger.Infof(ctx, "Failed to record node event [%+v] with err: %v", ev, err)
// Only attempt to retry sending an event in the case we tried to send raw output data inline
if eventConfig.FallbackToOutputReference && rawOutputPolicy == config.RawOutputPolicyInline {
- logger.Infof(ctx, "Falling back to sending node event outputs by reference for [%+v]", ev.Id)
+ logger.Infof(ctx, "Falling back to sending node event outputs by reference for [%+v]", ev.GetId())
return r.handleFailure(ctx, origEvent, err)
}
return err
diff --git a/flytepropeller/events/node_event_recorder_test.go b/flytepropeller/events/node_event_recorder_test.go
index 5d2025b525..d3321d649e 100644
--- a/flytepropeller/events/node_event_recorder_test.go
+++ b/flytepropeller/events/node_event_recorder_test.go
@@ -82,7 +82,7 @@ func TestRecordNodeEvent_Success_InlineOutputs(t *testing.T) {
store: mockStore,
}
err := recorder.RecordNodeEvent(ctx, getReferenceNodeEv(), inlineEventConfig)
- assert.Equal(t, deckURI, nodeEvent.DeckUri)
+ assert.Equal(t, deckURI, nodeEvent.GetDeckUri())
assert.NoError(t, err)
}
diff --git a/flytepropeller/events/task_event_recorder.go b/flytepropeller/events/task_event_recorder.go
index 8b531ae85f..3882802dd4 100644
--- a/flytepropeller/events/task_event_recorder.go
+++ b/flytepropeller/events/task_event_recorder.go
@@ -69,7 +69,7 @@ func (r *taskEventRecorder) RecordTaskEvent(ctx context.Context, ev *event.TaskE
logger.Infof(ctx, "Failed to record task event [%+v] with err: %v", ev, err)
// Only attempt to retry sending an event in the case we tried to send raw output data inline
if eventConfig.FallbackToOutputReference && rawOutputPolicy == config.RawOutputPolicyInline {
- logger.Infof(ctx, "Falling back to sending task event outputs by reference for [%+v]", ev.TaskId)
+ logger.Infof(ctx, "Falling back to sending task event outputs by reference for [%+v]", ev.GetTaskId())
return r.handleFailure(ctx, origEvent, err)
}
return err
diff --git a/flytepropeller/events/workflow_event_recorder.go b/flytepropeller/events/workflow_event_recorder.go
index f0f48a7f9d..5e56799925 100644
--- a/flytepropeller/events/workflow_event_recorder.go
+++ b/flytepropeller/events/workflow_event_recorder.go
@@ -69,7 +69,7 @@ func (r *workflowEventRecorder) RecordWorkflowEvent(ctx context.Context, ev *eve
logger.Infof(ctx, "Failed to record workflow event [%+v] with err: %v", ev, err)
// Only attempt to retry sending an event in the case we tried to send raw output data inline
if eventConfig.FallbackToOutputReference && rawOutputPolicy == config.RawOutputPolicyInline {
- logger.Infof(ctx, "Falling back to sending workflow event outputs by reference for [%+v]", ev.ExecutionId)
+ logger.Infof(ctx, "Falling back to sending workflow event outputs by reference for [%+v]", ev.GetExecutionId())
return r.handleFailure(ctx, origEvent, err)
}
return err
diff --git a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/branch_test.go b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/branch_test.go
index 5fd2a14218..b10c704409 100644
--- a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/branch_test.go
+++ b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/branch_test.go
@@ -17,7 +17,7 @@ func TestMarshalUnMarshal_BranchTask(t *testing.T) {
err = json.Unmarshal(r, &o)
assert.NoError(t, err)
assert.NotNil(t, o.BranchNode.If)
- assert.Equal(t, core.ComparisonExpression_GT, o.BranchNode.If.Condition.BooleanExpression.GetComparison().Operator)
+ assert.Equal(t, core.ComparisonExpression_GT, o.GetBranchNode().GetIf().GetCondition().GetComparison().GetOperator())
assert.Equal(t, 1, len(o.InputBindings))
raw, err := json.Marshal(o)
if assert.NoError(t, err) {
diff --git a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/identifier_test.go b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/identifier_test.go
index 1267aec09b..b7bafaacb3 100644
--- a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/identifier_test.go
+++ b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/identifier_test.go
@@ -100,16 +100,16 @@ func TestTaskExecutionIdentifier_DeepCopyInto(t *testing.T) {
teIdentifierCopy := TaskExecutionIdentifier{}
teIdentifier.DeepCopyInto(&teIdentifierCopy)
- assert.Equal(t, teIdentifier.TaskId.ResourceType, teIdentifierCopy.TaskId.ResourceType)
- assert.Equal(t, teIdentifier.TaskId.Project, teIdentifierCopy.TaskId.Project)
- assert.Equal(t, teIdentifier.TaskId.Domain, teIdentifierCopy.TaskId.Domain)
- assert.Equal(t, teIdentifier.TaskId.Name, teIdentifierCopy.TaskId.Name)
- assert.Equal(t, teIdentifier.TaskId.Version, teIdentifierCopy.TaskId.Version)
- assert.Equal(t, teIdentifier.TaskId.Org, teIdentifierCopy.TaskId.Org)
- assert.Equal(t, teIdentifier.NodeExecutionId.ExecutionId.Project, teIdentifierCopy.NodeExecutionId.ExecutionId.Project)
- assert.Equal(t, teIdentifier.NodeExecutionId.ExecutionId.Domain, teIdentifierCopy.NodeExecutionId.ExecutionId.Domain)
- assert.Equal(t, teIdentifier.NodeExecutionId.ExecutionId.Name, teIdentifierCopy.NodeExecutionId.ExecutionId.Name)
- assert.Equal(t, teIdentifier.NodeExecutionId.ExecutionId.Org, teIdentifierCopy.NodeExecutionId.ExecutionId.Org)
- assert.Equal(t, teIdentifier.NodeExecutionId.NodeId, teIdentifierCopy.NodeExecutionId.NodeId)
+ assert.Equal(t, teIdentifier.TaskId.GetResourceType(), teIdentifierCopy.TaskId.GetResourceType())
+ assert.Equal(t, teIdentifier.TaskId.GetProject(), teIdentifierCopy.TaskId.GetProject())
+ assert.Equal(t, teIdentifier.TaskId.GetDomain(), teIdentifierCopy.TaskId.GetDomain())
+ assert.Equal(t, teIdentifier.TaskId.GetName(), teIdentifierCopy.TaskId.GetName())
+ assert.Equal(t, teIdentifier.TaskId.GetVersion(), teIdentifierCopy.TaskId.GetVersion())
+ assert.Equal(t, teIdentifier.TaskId.GetOrg(), teIdentifierCopy.TaskId.GetOrg())
+ assert.Equal(t, teIdentifier.NodeExecutionId.GetExecutionId().GetProject(), teIdentifierCopy.NodeExecutionId.GetExecutionId().GetProject())
+ assert.Equal(t, teIdentifier.NodeExecutionId.GetExecutionId().GetDomain(), teIdentifierCopy.NodeExecutionId.GetExecutionId().GetDomain())
+ assert.Equal(t, teIdentifier.NodeExecutionId.GetExecutionId().GetName(), teIdentifierCopy.NodeExecutionId.GetExecutionId().GetName())
+ assert.Equal(t, teIdentifier.NodeExecutionId.GetExecutionId().GetOrg(), teIdentifierCopy.NodeExecutionId.GetExecutionId().GetOrg())
+ assert.Equal(t, teIdentifier.NodeExecutionId.GetNodeId(), teIdentifierCopy.NodeExecutionId.GetNodeId())
assert.Equal(t, teIdentifier.RetryAttempt, teIdentifierCopy.RetryAttempt)
}
diff --git a/flytepropeller/pkg/compiler/admin.go b/flytepropeller/pkg/compiler/admin.go
index cceab67f67..94646c95d2 100644
--- a/flytepropeller/pkg/compiler/admin.go
+++ b/flytepropeller/pkg/compiler/admin.go
@@ -29,8 +29,8 @@ func (p *LaunchPlanInterfaceProvider) GetExpectedOutputs() *core.VariableMap {
func NewLaunchPlanInterfaceProvider(launchPlan *admin.LaunchPlan) *LaunchPlanInterfaceProvider {
return &LaunchPlanInterfaceProvider{
- expectedInputs: launchPlan.Closure.ExpectedInputs,
- expectedOutputs: launchPlan.Closure.ExpectedOutputs,
- identifier: launchPlan.Id,
+ expectedInputs: launchPlan.GetClosure().GetExpectedInputs(),
+ expectedOutputs: launchPlan.GetClosure().GetExpectedOutputs(),
+ identifier: launchPlan.GetId(),
}
}
diff --git a/flytepropeller/pkg/compiler/admin_test.go b/flytepropeller/pkg/compiler/admin_test.go
index 8e5447bfb1..a4a0891d51 100644
--- a/flytepropeller/pkg/compiler/admin_test.go
+++ b/flytepropeller/pkg/compiler/admin_test.go
@@ -59,15 +59,15 @@ func TestGetId(t *testing.T) {
func TestGetExpectedInputs(t *testing.T) {
launchPlan := getDummyLaunchPlan()
provider := NewLaunchPlanInterfaceProvider(launchPlan)
- assert.Contains(t, (*provider.GetExpectedInputs()).Parameters, "foo")
- assert.NotNil(t, (*provider.GetExpectedInputs()).Parameters["foo"].Var.Type.GetSimple())
- assert.EqualValues(t, "STRING", (*provider.GetExpectedInputs()).Parameters["foo"].Var.Type.GetSimple().String())
- assert.NotNil(t, (*provider.GetExpectedInputs()).Parameters["foo"].GetDefault())
+ assert.Contains(t, (*provider.GetExpectedInputs()).GetParameters(), "foo")
+ assert.NotNil(t, (*provider.GetExpectedInputs()).GetParameters()["foo"].GetVar().GetType().GetSimple())
+ assert.EqualValues(t, "STRING", (*provider.GetExpectedInputs()).GetParameters()["foo"].GetVar().GetType().GetSimple().String())
+ assert.NotNil(t, (*provider.GetExpectedInputs()).GetParameters()["foo"].GetDefault())
}
func TestGetExpectedOutputs(t *testing.T) {
launchPlan := getDummyLaunchPlan()
provider := NewLaunchPlanInterfaceProvider(launchPlan)
- assert.EqualValues(t, outputs.Variables["foo"].GetType().GetType(),
- provider.GetExpectedOutputs().Variables["foo"].GetType().GetType())
+ assert.EqualValues(t, outputs.GetVariables()["foo"].GetType().GetType(),
+ provider.GetExpectedOutputs().GetVariables()["foo"].GetType().GetType())
}
diff --git a/flytepropeller/pkg/compiler/builders.go b/flytepropeller/pkg/compiler/builders.go
index 908a4b91cb..6e875abc40 100644
--- a/flytepropeller/pkg/compiler/builders.go
+++ b/flytepropeller/pkg/compiler/builders.go
@@ -32,8 +32,8 @@ type workflowBuilder struct {
}
func (w workflowBuilder) GetFailureNode() c.Node {
- if w.GetCoreWorkflow() != nil && w.GetCoreWorkflow().GetTemplate() != nil && w.GetCoreWorkflow().GetTemplate().FailureNode != nil {
- return w.GetOrCreateNodeBuilder(w.GetCoreWorkflow().GetTemplate().FailureNode)
+ if w.GetCoreWorkflow() != nil && w.GetCoreWorkflow().GetTemplate() != nil && w.GetCoreWorkflow().GetTemplate().GetFailureNode() != nil {
+ return w.GetOrCreateNodeBuilder(w.GetCoreWorkflow().GetTemplate().GetFailureNode())
}
return nil
@@ -152,8 +152,8 @@ func (t taskBuilder) GetCoreTask() *core.TaskTemplate {
}
func (t taskBuilder) GetID() c.Identifier {
- if t.flyteTask.Id != nil {
- return t.flyteTask.Id
+ if t.flyteTask.GetId() != nil {
+ return t.flyteTask.GetId()
}
return &core.Identifier{}
diff --git a/flytepropeller/pkg/compiler/common/id_set.go b/flytepropeller/pkg/compiler/common/id_set.go
index eb118fae64..a5cf98dd8d 100644
--- a/flytepropeller/pkg/compiler/common/id_set.go
+++ b/flytepropeller/pkg/compiler/common/id_set.go
@@ -62,24 +62,24 @@ type sortableSliceOfString []Identifier
func (s sortableSliceOfString) Len() int { return len(s) }
func (s sortableSliceOfString) Less(i, j int) bool {
first, second := s[i], s[j]
- if first.ResourceType != second.ResourceType {
- return first.ResourceType < second.ResourceType
+ if first.GetResourceType() != second.GetResourceType() {
+ return first.GetResourceType() < second.GetResourceType()
}
- if first.Project != second.Project {
- return first.Project < second.Project
+ if first.GetProject() != second.GetProject() {
+ return first.GetProject() < second.GetProject()
}
- if first.Domain != second.Domain {
- return first.Domain < second.Domain
+ if first.GetDomain() != second.GetDomain() {
+ return first.GetDomain() < second.GetDomain()
}
- if first.Name != second.Name {
- return first.Name < second.Name
+ if first.GetName() != second.GetName() {
+ return first.GetName() < second.GetName()
}
- if first.Version != second.Version {
- return first.Version < second.Version
+ if first.GetVersion() != second.GetVersion() {
+ return first.GetVersion() < second.GetVersion()
}
return false
diff --git a/flytepropeller/pkg/compiler/common/index.go b/flytepropeller/pkg/compiler/common/index.go
index 365a3356c1..d244103e35 100644
--- a/flytepropeller/pkg/compiler/common/index.go
+++ b/flytepropeller/pkg/compiler/common/index.go
@@ -55,16 +55,16 @@ func NewWorkflowIndex(workflows []*core.CompiledWorkflow, errs errors.CompileErr
ok = true
index = make(WorkflowIndex, len(workflows))
for _, wf := range workflows {
- if wf.Template.Id == nil {
+ if wf.GetTemplate().GetId() == nil {
// TODO: Log/Return error
return nil, false
}
- if _, found := index[wf.Template.Id.String()]; found {
- errs.Collect(errors.NewDuplicateIDFoundErr(wf.Template.Id.String()))
+ if _, found := index[wf.GetTemplate().GetId().String()]; found {
+ errs.Collect(errors.NewDuplicateIDFoundErr(wf.GetTemplate().GetId().String()))
ok = false
} else {
- index[wf.Template.Id.String()] = wf
+ index[wf.GetTemplate().GetId().String()] = wf
}
}
diff --git a/flytepropeller/pkg/compiler/requirements.go b/flytepropeller/pkg/compiler/requirements.go
index b3b01823a6..69265b64a1 100644
--- a/flytepropeller/pkg/compiler/requirements.go
+++ b/flytepropeller/pkg/compiler/requirements.go
@@ -57,11 +57,11 @@ func getRequirements(fg *core.WorkflowTemplate, subWfs common.WorkflowIndex, fol
func updateWorkflowRequirements(workflow *core.WorkflowTemplate, subWfs common.WorkflowIndex,
taskIds, workflowIds common.IdentifierSet, followSubworkflows bool, errs errors.CompileErrors) {
- for _, node := range workflow.Nodes {
+ for _, node := range workflow.GetNodes() {
updateNodeRequirements(node, subWfs, taskIds, workflowIds, followSubworkflows, errs)
}
- if workflow.FailureNode != nil {
- updateNodeRequirements(workflow.FailureNode, subWfs, taskIds, workflowIds, followSubworkflows, errs)
+ if workflow.GetFailureNode() != nil {
+ updateNodeRequirements(workflow.GetFailureNode(), subWfs, taskIds, workflowIds, followSubworkflows, errs)
}
}
@@ -75,21 +75,21 @@ func updateNodeRequirements(node *flyteNode, subWfs common.WorkflowIndex, taskId
workflowIds.Insert(workflowNode.GetLaunchplanRef())
} else if workflowNode.GetSubWorkflowRef() != nil && followSubworkflows {
if subWf, found := subWfs[workflowNode.GetSubWorkflowRef().String()]; !found {
- errs.Collect(errors.NewWorkflowReferenceNotFoundErr(node.Id, workflowNode.GetSubWorkflowRef().String()))
+ errs.Collect(errors.NewWorkflowReferenceNotFoundErr(node.GetId(), workflowNode.GetSubWorkflowRef().String()))
} else {
- updateWorkflowRequirements(subWf.Template, subWfs, taskIds, workflowIds, followSubworkflows, errs)
+ updateWorkflowRequirements(subWf.GetTemplate(), subWfs, taskIds, workflowIds, followSubworkflows, errs)
}
}
} else if branchN := node.GetBranchNode(); branchN != nil {
- updateNodeRequirements(branchN.IfElse.Case.ThenNode, subWfs, taskIds, workflowIds, followSubworkflows, errs)
- for _, otherCase := range branchN.IfElse.Other {
- updateNodeRequirements(otherCase.ThenNode, subWfs, taskIds, workflowIds, followSubworkflows, errs)
+ updateNodeRequirements(branchN.GetIfElse().GetCase().GetThenNode(), subWfs, taskIds, workflowIds, followSubworkflows, errs)
+ for _, otherCase := range branchN.GetIfElse().GetOther() {
+ updateNodeRequirements(otherCase.GetThenNode(), subWfs, taskIds, workflowIds, followSubworkflows, errs)
}
- if elseNode := branchN.IfElse.GetElseNode(); elseNode != nil {
+ if elseNode := branchN.GetIfElse().GetElseNode(); elseNode != nil {
updateNodeRequirements(elseNode, subWfs, taskIds, workflowIds, followSubworkflows, errs)
}
} else if arrayNode := node.GetArrayNode(); arrayNode != nil {
- updateNodeRequirements(arrayNode.Node, subWfs, taskIds, workflowIds, followSubworkflows, errs)
+ updateNodeRequirements(arrayNode.GetNode(), subWfs, taskIds, workflowIds, followSubworkflows, errs)
}
}
diff --git a/flytepropeller/pkg/compiler/task_compiler.go b/flytepropeller/pkg/compiler/task_compiler.go
index 4d8fea46db..ea6e4efef2 100644
--- a/flytepropeller/pkg/compiler/task_compiler.go
+++ b/flytepropeller/pkg/compiler/task_compiler.go
@@ -23,25 +23,25 @@ func validateResource(resourceName core.Resources_ResourceName, resourceVal stri
func validateKnownResources(resources []*core.Resources_ResourceEntry, errs errors.CompileErrors) {
for _, r := range resources {
- validateResource(r.Name, r.Value, errs.NewScope())
+ validateResource(r.GetName(), r.GetValue(), errs.NewScope())
}
}
func validateResources(resources *core.Resources, errs errors.CompileErrors) (ok bool) {
// Validate known resource keys.
- validateKnownResources(resources.Requests, errs.NewScope())
- validateKnownResources(resources.Limits, errs.NewScope())
+ validateKnownResources(resources.GetRequests(), errs.NewScope())
+ validateKnownResources(resources.GetLimits(), errs.NewScope())
return !errs.HasErrors()
}
func validateContainerCommand(task *core.TaskTemplate, errs errors.CompileErrors) (ok bool) {
- if task.Interface == nil {
+ if task.GetInterface() == nil {
// Nothing to validate.
return
}
- hasInputs := task.Interface.Inputs != nil && len(task.Interface.GetInputs().Variables) > 0
- hasOutputs := task.Interface.Outputs != nil && len(task.Interface.GetOutputs().Variables) > 0
+ hasInputs := task.GetInterface().GetInputs() != nil && len(task.GetInterface().GetInputs().GetVariables()) > 0
+ hasOutputs := task.GetInterface().GetOutputs() != nil && len(task.GetInterface().GetOutputs().GetVariables()) > 0
if !(hasInputs || hasOutputs) {
// Nothing to validate.
return
@@ -63,12 +63,12 @@ func validateContainer(task *core.TaskTemplate, errs errors.CompileErrors) (ok b
validateContainerCommand(task, errs)
container := task.GetContainer()
- if container.Image == "" {
+ if container.GetImage() == "" {
errs.Collect(errors.NewValueRequiredErr("container", "image"))
}
- if container.Resources != nil {
- validateResources(container.Resources, errs.NewScope())
+ if container.GetResources() != nil {
+ validateResources(container.GetResources(), errs.NewScope())
}
return !errs.HasErrors()
@@ -80,7 +80,7 @@ func validateK8sPod(task *core.TaskTemplate, errs errors.CompileErrors) (ok bool
return
}
var podSpec v1.PodSpec
- if err := utils.UnmarshalStructToObj(task.GetK8SPod().PodSpec, &podSpec); err != nil {
+ if err := utils.UnmarshalStructToObj(task.GetK8SPod().GetPodSpec(), &podSpec); err != nil {
errs.Collect(errors.NewInvalidValueErr("root", "k8s pod spec"))
return
}
@@ -93,7 +93,7 @@ func validateK8sPod(task *core.TaskTemplate, errs errors.CompileErrors) (ok bool
}
func compileTaskInternal(task *core.TaskTemplate, errs errors.CompileErrors) common.Task {
- if task.Id == nil {
+ if task.GetId() == nil {
errs.Collect(errors.NewValueRequiredErr("root", "Id"))
}
diff --git a/flytepropeller/pkg/compiler/test/compiler_test.go b/flytepropeller/pkg/compiler/test/compiler_test.go
index 355fc4a15b..a6925dc3de 100644
--- a/flytepropeller/pkg/compiler/test/compiler_test.go
+++ b/flytepropeller/pkg/compiler/test/compiler_test.go
@@ -3,7 +3,6 @@ package test
import (
"encoding/json"
"flag"
- "io/ioutil"
"os"
"path/filepath"
"reflect"
@@ -36,27 +35,27 @@ func makeDefaultInputs(iface *core.TypedInterface) *core.LiteralMap {
return nil
}
- res := make(map[string]*core.Literal, len(iface.GetInputs().Variables))
- for inputName, inputVar := range iface.GetInputs().Variables {
+ res := make(map[string]*core.Literal, len(iface.GetInputs().GetVariables()))
+ for inputName, inputVar := range iface.GetInputs().GetVariables() {
// A workaround because the coreutils don't support the "StructuredDataSet" type
- if reflect.TypeOf(inputVar.Type.Type) == reflect.TypeOf(&core.LiteralType_StructuredDatasetType{}) {
+ if reflect.TypeOf(inputVar.GetType().GetType()) == reflect.TypeOf(&core.LiteralType_StructuredDatasetType{}) {
res[inputName] = &core.Literal{
Value: &core.Literal_Scalar{
Scalar: &core.Scalar{
Value: &core.Scalar_StructuredDataset{
StructuredDataset: &core.StructuredDataset{
Metadata: &core.StructuredDatasetMetadata{
- StructuredDatasetType: inputVar.Type.Type.(*core.LiteralType_StructuredDatasetType).StructuredDatasetType,
+ StructuredDatasetType: inputVar.GetType().GetType().(*core.LiteralType_StructuredDatasetType).StructuredDatasetType,
},
},
},
},
},
}
- } else if reflect.TypeOf(inputVar.Type.Type) == reflect.TypeOf(&core.LiteralType_Simple{}) && inputVar.Type.GetSimple() == core.SimpleType_DATETIME {
+ } else if reflect.TypeOf(inputVar.GetType().GetType()) == reflect.TypeOf(&core.LiteralType_Simple{}) && inputVar.GetType().GetSimple() == core.SimpleType_DATETIME {
res[inputName] = coreutils.MustMakeLiteral(time.UnixMicro(10))
} else {
- res[inputName] = coreutils.MustMakeDefaultLiteralForType(inputVar.Type)
+ res[inputName] = coreutils.MustMakeDefaultLiteralForType(inputVar.GetType())
}
}
@@ -114,7 +113,7 @@ func TestDynamic(t *testing.T) {
// t.SkipNow()
//}
- raw, err := ioutil.ReadFile(path)
+ raw, err := os.ReadFile(path)
assert.NoError(t, err)
wf := &core.DynamicJobSpec{}
err = utils.UnmarshalBytesToPb(raw, wf)
@@ -123,7 +122,7 @@ func TestDynamic(t *testing.T) {
}
t.Log("Compiling Workflow")
- compiledTasks := mustCompileTasks(t, wf.Tasks)
+ compiledTasks := mustCompileTasks(t, wf.GetTasks())
wfTemplate := &core.WorkflowTemplate{
Id: &core.Identifier{
Domain: "domain",
@@ -146,16 +145,16 @@ func TestDynamic(t *testing.T) {
},
}},
},
- Nodes: wf.Nodes,
- Outputs: wf.Outputs,
+ Nodes: wf.GetNodes(),
+ Outputs: wf.GetOutputs(),
}
- compiledWfc, err := compiler.CompileWorkflow(wfTemplate, wf.Subworkflows, compiledTasks,
+ compiledWfc, err := compiler.CompileWorkflow(wfTemplate, wf.GetSubworkflows(), compiledTasks,
[]common.InterfaceProvider{})
if !assert.NoError(t, err) {
t.FailNow()
}
- inputs := makeDefaultInputs(compiledWfc.Primary.Template.Interface)
+ inputs := makeDefaultInputs(compiledWfc.GetPrimary().GetTemplate().GetInterface())
flyteWf, err := k8s.BuildFlyteWorkflow(compiledWfc,
inputs,
@@ -180,22 +179,22 @@ func TestDynamic(t *testing.T) {
func getAllSubNodeIDs(n *core.Node) sets.String {
res := sets.NewString()
if branchNode := n.GetBranchNode(); branchNode != nil {
- thenNode := branchNode.IfElse.Case.ThenNode
+ thenNode := branchNode.GetIfElse().GetCase().GetThenNode()
if hasPromiseInputs(thenNode.GetInputs()) {
res.Insert(thenNode.GetId())
}
res = res.Union(getAllSubNodeIDs(thenNode))
- for _, other := range branchNode.IfElse.Other {
- if hasPromiseInputs(other.ThenNode.GetInputs()) {
- res.Insert(other.ThenNode.GetId())
+ for _, other := range branchNode.GetIfElse().GetOther() {
+ if hasPromiseInputs(other.GetThenNode().GetInputs()) {
+ res.Insert(other.GetThenNode().GetId())
}
- res = res.Union(getAllSubNodeIDs(other.ThenNode))
+ res = res.Union(getAllSubNodeIDs(other.GetThenNode()))
}
- if elseNode := branchNode.IfElse.GetElseNode(); elseNode != nil {
+ if elseNode := branchNode.GetIfElse().GetElseNode(); elseNode != nil {
if hasPromiseInputs(elseNode.GetInputs()) {
res.Insert(elseNode.GetId())
}
@@ -221,7 +220,7 @@ var allNodesPredicate = func(n *core.Node) bool {
func getAllMatchingNodes(wf *core.CompiledWorkflow, predicate nodePredicate) sets.String {
s := sets.NewString()
- for _, n := range wf.Template.Nodes {
+ for _, n := range wf.GetTemplate().GetNodes() {
if predicate(n) {
s.Insert(n.GetId())
}
@@ -235,13 +234,13 @@ func getAllMatchingNodes(wf *core.CompiledWorkflow, predicate nodePredicate) set
func bindingHasPromiseInputs(binding *core.BindingData) bool {
switch v := binding.GetValue().(type) {
case *core.BindingData_Collection:
- for _, d := range v.Collection.Bindings {
+ for _, d := range v.Collection.GetBindings() {
if bindingHasPromiseInputs(d) {
return true
}
}
case *core.BindingData_Map:
- for _, d := range v.Map.Bindings {
+ for _, d := range v.Map.GetBindings() {
if bindingHasPromiseInputs(d) {
return true
}
@@ -255,7 +254,7 @@ func bindingHasPromiseInputs(binding *core.BindingData) bool {
func hasPromiseInputs(bindings []*core.Binding) bool {
for _, b := range bindings {
- if bindingHasPromiseInputs(b.Binding) {
+ if bindingHasPromiseInputs(b.GetBinding()) {
return true
}
}
@@ -265,14 +264,14 @@ func hasPromiseInputs(bindings []*core.Binding) bool {
func assertNodeIDsInConnections(t testing.TB, nodeIDsWithDeps, allNodeIDs sets.String, connections *core.ConnectionSet) bool {
actualNodeIDs := sets.NewString()
- for id, lst := range connections.Downstream {
+ for id, lst := range connections.GetDownstream() {
actualNodeIDs.Insert(id)
- actualNodeIDs.Insert(lst.Ids...)
+ actualNodeIDs.Insert(lst.GetIds()...)
}
- for id, lst := range connections.Upstream {
+ for id, lst := range connections.GetUpstream() {
actualNodeIDs.Insert(id)
- actualNodeIDs.Insert(lst.Ids...)
+ actualNodeIDs.Insert(lst.GetIds()...)
}
notFoundInConnections := nodeIDsWithDeps.Difference(actualNodeIDs)
@@ -305,13 +304,13 @@ func storeOrDiff(t testing.TB, f func(obj any) ([]byte, error), obj any, path st
}
if *update {
- err = ioutil.WriteFile(path, raw, os.ModePerm)
+ err = os.WriteFile(path, raw, os.ModePerm) // #nosec G306
if !assert.NoError(t, err) {
return false
}
} else {
- goldenRaw, err := ioutil.ReadFile(path)
+ goldenRaw, err := os.ReadFile(path)
if !assert.NoError(t, err) {
return false
}
@@ -339,7 +338,7 @@ func runCompileTest(t *testing.T, dirName string) {
}
for _, p := range paths {
- raw, err := ioutil.ReadFile(p)
+ raw, err := os.ReadFile(p)
assert.NoError(t, err)
tsk := &admin.TaskSpec{}
err = proto.Unmarshal(raw, tsk)
@@ -349,13 +348,13 @@ func runCompileTest(t *testing.T, dirName string) {
}
t.Run(p, func(t *testing.T) {
- inputTask := tsk.Template
+ inputTask := tsk.GetTemplate()
setDefaultFields(inputTask)
task, err := compiler.CompileTask(inputTask)
if !assert.NoError(t, err) {
t.FailNow()
}
- compiledTasks[tsk.Template.Id.String()] = task
+ compiledTasks[tsk.GetTemplate().GetId().String()] = task
// unmarshal from json file to compare rather than yaml
taskFile := filepath.Join(filepath.Dir(p), "compiled", strings.TrimRight(filepath.Base(p), filepath.Ext(p))+"_task.json")
@@ -387,7 +386,7 @@ func runCompileTest(t *testing.T, dirName string) {
}
t.Run(p, func(t *testing.T) {
- inputWf := wf.Workflow
+ inputWf := wf.GetWorkflow()
reqs, err := compiler.GetRequirements(inputWf, nil)
if !assert.NoError(t, err) {
@@ -411,9 +410,9 @@ func runCompileTest(t *testing.T, dirName string) {
t.FailNow()
}
- allNodeIDs := getAllMatchingNodes(compiledWfc.Primary, allNodesPredicate)
- nodeIDsWithDeps := getAllMatchingNodes(compiledWfc.Primary, hasPromiseNodePredicate)
- if !assertNodeIDsInConnections(t, nodeIDsWithDeps, allNodeIDs, compiledWfc.Primary.Connections) {
+ allNodeIDs := getAllMatchingNodes(compiledWfc.GetPrimary(), allNodesPredicate)
+ nodeIDsWithDeps := getAllMatchingNodes(compiledWfc.GetPrimary(), hasPromiseNodePredicate)
+ if !assertNodeIDsInConnections(t, nodeIDsWithDeps, allNodeIDs, compiledWfc.GetPrimary().GetConnections()) {
t.FailNow()
}
@@ -433,7 +432,7 @@ func runCompileTest(t *testing.T, dirName string) {
for _, p := range paths {
t.Run(p, func(t *testing.T) {
- raw, err := ioutil.ReadFile(p)
+ raw, err := os.ReadFile(p)
if !assert.NoError(t, err) {
t.FailNow()
}
@@ -443,9 +442,9 @@ func runCompileTest(t *testing.T, dirName string) {
t.FailNow()
}
- inputs := makeDefaultInputs(compiledWfc.Primary.Template.Interface)
+ inputs := makeDefaultInputs(compiledWfc.GetPrimary().GetTemplate().GetInterface())
- dotFormat := visualize.ToGraphViz(compiledWfc.Primary)
+ dotFormat := visualize.ToGraphViz(compiledWfc.GetPrimary())
t.Logf("GraphViz Dot: %v\n", dotFormat)
flyteWf, err := k8s.BuildFlyteWorkflow(compiledWfc,
diff --git a/flytepropeller/pkg/compiler/transformers/k8s/inputs.go b/flytepropeller/pkg/compiler/transformers/k8s/inputs.go
index 2b94570c20..6d7572e9f5 100644
--- a/flytepropeller/pkg/compiler/transformers/k8s/inputs.go
+++ b/flytepropeller/pkg/compiler/transformers/k8s/inputs.go
@@ -15,20 +15,20 @@ func validateInputs(nodeID common.NodeID, iface *core.TypedInterface, inputs cor
return false
}
- if iface.Inputs == nil {
+ if iface.GetInputs() == nil {
errs.Collect(errors.NewValueRequiredErr(nodeID, "interface.InputsRef"))
return false
}
- varMap := make(map[string]*core.Variable, len(iface.Inputs.Variables))
+ varMap := make(map[string]*core.Variable, len(iface.GetInputs().GetVariables()))
requiredInputsSet := sets.String{}
- for name, v := range iface.Inputs.Variables {
+ for name, v := range iface.GetInputs().GetVariables() {
varMap[name] = v
requiredInputsSet.Insert(name)
}
boundInputsSet := sets.String{}
- for inputVar, inputVal := range inputs.Literals {
+ for inputVar, inputVal := range inputs.GetLiterals() {
v, exists := varMap[inputVar]
if !exists {
errs.Collect(errors.NewVariableNameNotFoundErr(nodeID, "", inputVar))
@@ -41,8 +41,8 @@ func validateInputs(nodeID common.NodeID, iface *core.TypedInterface, inputs cor
errs.Collect(errors.NewInvalidLiteralTypeErr(nodeID, inputVar, err))
continue
}
- if !validators.AreTypesCastable(inputType, v.Type) {
- errs.Collect(errors.NewMismatchingTypesErr(nodeID, inputVar, common.LiteralTypeToStr(v.Type), common.LiteralTypeToStr(inputType)))
+ if !validators.AreTypesCastable(inputType, v.GetType()) {
+ errs.Collect(errors.NewMismatchingTypesErr(nodeID, inputVar, common.LiteralTypeToStr(v.GetType()), common.LiteralTypeToStr(inputType)))
continue
}
diff --git a/flytepropeller/pkg/compiler/transformers/k8s/node.go b/flytepropeller/pkg/compiler/transformers/k8s/node.go
index 8a4c9248ec..18ec1ba02f 100644
--- a/flytepropeller/pkg/compiler/transformers/k8s/node.go
+++ b/flytepropeller/pkg/compiler/transformers/k8s/node.go
@@ -35,8 +35,8 @@ func buildNodeSpec(n *core.Node, tasks []*core.CompiledTask, errs errors.Compile
taskID := n.GetTaskNode().GetReferenceId().String()
// TODO: Use task index for quick lookup
for _, t := range tasks {
- if t.Template.Id.String() == taskID {
- task = t.Template
+ if t.GetTemplate().GetId().String() == taskID {
+ task = t.GetTemplate()
break
}
}
@@ -46,7 +46,7 @@ func buildNodeSpec(n *core.Node, tasks []*core.CompiledTask, errs errors.Compile
return nil, !errs.HasErrors()
}
- if overrides := n.GetTaskNode().Overrides; overrides != nil {
+ if overrides := n.GetTaskNode().GetOverrides(); overrides != nil {
if overrides.GetResources() != nil {
resources = overrides.GetResources()
}
@@ -87,7 +87,7 @@ func buildNodeSpec(n *core.Node, tasks []*core.CompiledTask, errs errors.Compile
interruptVal := n.GetMetadata().GetInterruptible()
interruptible = &interruptVal
}
- name = n.GetMetadata().Name
+ name = n.GetMetadata().GetName()
}
nodeSpec := &v1alpha1.NodeSpec{
@@ -114,7 +114,7 @@ func buildNodeSpec(n *core.Node, tasks []*core.CompiledTask, errs errors.Compile
return nil, !errs.HasErrors()
}
- switch n.GetWorkflowNode().Reference.(type) {
+ switch n.GetWorkflowNode().GetReference().(type) {
case *core.WorkflowNode_LaunchplanRef:
nodeSpec.Kind = v1alpha1.NodeKindWorkflow
nodeSpec.WorkflowNode = &v1alpha1.WorkflowNodeSpec{
@@ -146,7 +146,7 @@ func buildNodeSpec(n *core.Node, tasks []*core.CompiledTask, errs errors.Compile
case *core.Node_GateNode:
nodeSpec.Kind = v1alpha1.NodeKindGate
gateNode := n.GetGateNode()
- switch gateNode.Condition.(type) {
+ switch gateNode.GetCondition().(type) {
case *core.GateNode_Approve:
nodeSpec.GateNode = &v1alpha1.GateNodeSpec{
Kind: v1alpha1.ConditionKindApprove,
@@ -173,7 +173,7 @@ func buildNodeSpec(n *core.Node, tasks []*core.CompiledTask, errs errors.Compile
arrayNode := n.GetArrayNode()
// build subNodeSpecs
- subNodeSpecs, ok := buildNodeSpec(arrayNode.Node, tasks, errs)
+ subNodeSpecs, ok := buildNodeSpec(arrayNode.GetNode(), tasks, errs)
if !ok {
return nil, ok
}
@@ -191,7 +191,7 @@ func buildNodeSpec(n *core.Node, tasks []*core.CompiledTask, errs errors.Compile
Parallelism: parallelism,
}
- switch successCriteria := arrayNode.SuccessCriteria.(type) {
+ switch successCriteria := arrayNode.GetSuccessCriteria().(type) {
case *core.ArrayNode_MinSuccesses:
nodeSpec.ArrayNode.MinSuccesses = &successCriteria.MinSuccesses
case *core.ArrayNode_MinSuccessRatio:
@@ -209,13 +209,13 @@ func buildNodeSpec(n *core.Node, tasks []*core.CompiledTask, errs errors.Compile
}
func buildIfBlockSpec(block *core.IfBlock, tasks []*core.CompiledTask, errs errors.CompileErrors) (*v1alpha1.IfBlock, []*v1alpha1.NodeSpec) {
- nodeSpecs, ok := buildNodeSpec(block.ThenNode, tasks, errs)
+ nodeSpecs, ok := buildNodeSpec(block.GetThenNode(), tasks, errs)
if !ok {
return nil, []*v1alpha1.NodeSpec{}
}
return &v1alpha1.IfBlock{
- Condition: v1alpha1.BooleanExpression{BooleanExpression: block.Condition},
- ThenNode: refStr(block.ThenNode.Id),
+ Condition: v1alpha1.BooleanExpression{BooleanExpression: block.GetCondition()},
+ ThenNode: refStr(block.GetThenNode().GetId()),
}, nodeSpecs
}
@@ -226,26 +226,26 @@ func buildBranchNodeSpec(branch *core.BranchNode, tasks []*core.CompiledTask, er
var childNodes []*v1alpha1.NodeSpec
- branchNode, nodeSpecs := buildIfBlockSpec(branch.IfElse.Case, tasks, errs.NewScope())
+ branchNode, nodeSpecs := buildIfBlockSpec(branch.GetIfElse().GetCase(), tasks, errs.NewScope())
res := &v1alpha1.BranchNodeSpec{
If: *branchNode,
}
childNodes = append(childNodes, nodeSpecs...)
- switch branch.IfElse.GetDefault().(type) {
+ switch branch.GetIfElse().GetDefault().(type) {
case *core.IfElseBlock_ElseNode:
- ns, ok := buildNodeSpec(branch.IfElse.GetElseNode(), tasks, errs)
+ ns, ok := buildNodeSpec(branch.GetIfElse().GetElseNode(), tasks, errs)
if !ok {
return nil, []*v1alpha1.NodeSpec{}
}
childNodes = append(childNodes, ns...)
- res.Else = refStr(branch.IfElse.GetElseNode().Id)
+ res.Else = refStr(branch.GetIfElse().GetElseNode().GetId())
case *core.IfElseBlock_Error:
- res.ElseFail = branch.IfElse.GetError()
+ res.ElseFail = branch.GetIfElse().GetError()
}
- other := make([]*v1alpha1.IfBlock, 0, len(branch.IfElse.Other))
- for _, block := range branch.IfElse.Other {
+ other := make([]*v1alpha1.IfBlock, 0, len(branch.GetIfElse().GetOther()))
+ for _, block := range branch.GetIfElse().GetOther() {
b, ns := buildIfBlockSpec(block, tasks, errs.NewScope())
other = append(other, b)
childNodes = append(childNodes, ns...)
@@ -285,12 +285,12 @@ func buildTasks(tasks []*core.CompiledTask, errs errors.CompileErrors) map[commo
if flyteTask == nil {
errs.Collect(errors.NewValueRequiredErr("root", "coreTask"))
} else {
- taskID := flyteTask.Template.Id.String()
+ taskID := flyteTask.GetTemplate().GetId().String()
if _, exists := res[taskID]; exists {
errs.Collect(errors.NewValueCollisionError(taskID, "Id", taskID))
}
- res[taskID] = &v1alpha1.TaskSpec{TaskTemplate: flyteTask.Template}
+ res[taskID] = &v1alpha1.TaskSpec{TaskTemplate: flyteTask.GetTemplate()}
}
}
diff --git a/flytepropeller/pkg/compiler/transformers/k8s/node_test.go b/flytepropeller/pkg/compiler/transformers/k8s/node_test.go
index c6a08b5991..28fbb2bf55 100644
--- a/flytepropeller/pkg/compiler/transformers/k8s/node_test.go
+++ b/flytepropeller/pkg/compiler/transformers/k8s/node_test.go
@@ -175,7 +175,7 @@ func TestBuildNodeSpec(t *testing.T) {
n.Node.Target = &core.Node_WorkflowNode{
WorkflowNode: &core.WorkflowNode{
Reference: &core.WorkflowNode_SubWorkflowRef{
- SubWorkflowRef: n.subWF.GetCoreWorkflow().Template.Id,
+ SubWorkflowRef: n.subWF.GetCoreWorkflow().GetTemplate().GetId(),
},
},
}
@@ -394,15 +394,15 @@ func TestBuildTasks(t *testing.T) {
taskMap := buildTasks(tasks, errs)
annInputTask := taskMap[(&core.Identifier{Name: "annotatedInput"}).String()]
- assert.Nil(t, annInputTask.Interface.Inputs.Variables["a"].Type.Annotation)
+ assert.Nil(t, annInputTask.Interface.GetInputs().GetVariables()["a"].GetType().GetAnnotation())
unAnnInputTask := taskMap[(&core.Identifier{Name: "unannotatedInput"}).String()]
- assert.Nil(t, unAnnInputTask.Interface.Inputs.Variables["a"].Type.Annotation)
+ assert.Nil(t, unAnnInputTask.Interface.GetInputs().GetVariables()["a"].GetType().GetAnnotation())
annOutputTask := taskMap[(&core.Identifier{Name: "annotatedOutput"}).String()]
- assert.Nil(t, annOutputTask.Interface.Outputs.Variables["a"].Type.Annotation)
+ assert.Nil(t, annOutputTask.Interface.GetOutputs().GetVariables()["a"].GetType().GetAnnotation())
unAnnOutputTask := taskMap[(&core.Identifier{Name: "unannotatedOutput"}).String()]
- assert.Nil(t, unAnnOutputTask.Interface.Outputs.Variables["a"].Type.Annotation)
+ assert.Nil(t, unAnnOutputTask.Interface.GetOutputs().GetVariables()["a"].GetType().GetAnnotation())
})
}
diff --git a/flytepropeller/pkg/compiler/transformers/k8s/utils.go b/flytepropeller/pkg/compiler/transformers/k8s/utils.go
index 06884f4b75..bd08be3a9a 100644
--- a/flytepropeller/pkg/compiler/transformers/k8s/utils.go
+++ b/flytepropeller/pkg/compiler/transformers/k8s/utils.go
@@ -19,15 +19,15 @@ func refStr(s string) *string {
}
func computeRetryStrategy(n *core.Node, t *core.TaskTemplate) *v1alpha1.RetryStrategy {
- if n.GetMetadata() != nil && n.GetMetadata().GetRetries() != nil && n.GetMetadata().GetRetries().Retries != 0 {
+ if n.GetMetadata() != nil && n.GetMetadata().GetRetries() != nil && n.GetMetadata().GetRetries().GetRetries() != 0 {
return &v1alpha1.RetryStrategy{
- MinAttempts: refInt(int(n.GetMetadata().GetRetries().Retries + 1)),
+ MinAttempts: refInt(int(n.GetMetadata().GetRetries().GetRetries() + 1)),
}
}
- if t != nil && t.GetMetadata() != nil && t.GetMetadata().GetRetries() != nil && t.GetMetadata().GetRetries().Retries != 0 {
+ if t != nil && t.GetMetadata() != nil && t.GetMetadata().GetRetries() != nil && t.GetMetadata().GetRetries().GetRetries() != 0 {
return &v1alpha1.RetryStrategy{
- MinAttempts: refInt(int(t.GetMetadata().GetRetries().Retries + 1)),
+ MinAttempts: refInt(int(t.GetMetadata().GetRetries().GetRetries() + 1)),
}
}
@@ -91,10 +91,10 @@ func StripTypeMetadata(t *core.LiteralType) *core.LiteralType {
// interface of the dynamically compiled workflow. `Structure` is used to extend type checking information on
// different Flyte types and is therefore required to ensure correct type validation.
- switch underlyingType := c.Type.(type) {
+ switch underlyingType := c.GetType().(type) {
case *core.LiteralType_UnionType:
- variants := make([]*core.LiteralType, 0, len(c.GetUnionType().Variants))
- for _, variant := range c.GetUnionType().Variants {
+ variants := make([]*core.LiteralType, 0, len(c.GetUnionType().GetVariants()))
+ for _, variant := range c.GetUnionType().GetVariants() {
variants = append(variants, StripTypeMetadata(variant))
}
@@ -104,11 +104,11 @@ func StripTypeMetadata(t *core.LiteralType) *core.LiteralType {
case *core.LiteralType_CollectionType:
underlyingType.CollectionType = StripTypeMetadata(c.GetCollectionType())
case *core.LiteralType_StructuredDatasetType:
- columns := make([]*core.StructuredDatasetType_DatasetColumn, 0, len(c.GetStructuredDatasetType().Columns))
- for _, column := range c.GetStructuredDatasetType().Columns {
+ columns := make([]*core.StructuredDatasetType_DatasetColumn, 0, len(c.GetStructuredDatasetType().GetColumns()))
+ for _, column := range c.GetStructuredDatasetType().GetColumns() {
columns = append(columns, &core.StructuredDatasetType_DatasetColumn{
- Name: column.Name,
- LiteralType: StripTypeMetadata(column.LiteralType),
+ Name: column.GetName(),
+ LiteralType: StripTypeMetadata(column.GetLiteralType()),
})
}
@@ -125,17 +125,17 @@ func StripInterfaceTypeMetadata(iface *core.TypedInterface) *core.TypedInterface
newIface := *iface
- if iface.Inputs != nil {
- for name, i := range iface.Inputs.Variables {
- i.Type = StripTypeMetadata(i.Type)
+ if iface.GetInputs() != nil {
+ for name, i := range iface.GetInputs().GetVariables() {
+ i.Type = StripTypeMetadata(i.GetType())
i.Description = ""
newIface.Inputs.Variables[name] = i
}
}
- if iface.Outputs != nil {
- for name, i := range iface.Outputs.Variables {
- i.Type = StripTypeMetadata(i.Type)
+ if iface.GetOutputs() != nil {
+ for name, i := range iface.GetOutputs().GetVariables() {
+ i.Type = StripTypeMetadata(i.GetType())
i.Description = ""
iface.Outputs.Variables[name] = i
}
diff --git a/flytepropeller/pkg/compiler/transformers/k8s/utils_test.go b/flytepropeller/pkg/compiler/transformers/k8s/utils_test.go
index d2d9b10866..0a7e991399 100644
--- a/flytepropeller/pkg/compiler/transformers/k8s/utils_test.go
+++ b/flytepropeller/pkg/compiler/transformers/k8s/utils_test.go
@@ -14,9 +14,9 @@ func TestComputeRetryStrategy(t *testing.T) {
tests := []struct {
name string
- nodeRetries int
- taskRetries int
- expectedRetries int
+ nodeRetries uint32
+ taskRetries uint32
+ expectedRetries uint32
}{
{"node-only", 1, 0, 2},
{"task-only", 0, 1, 2},
@@ -31,7 +31,7 @@ func TestComputeRetryStrategy(t *testing.T) {
node = &core.Node{
Metadata: &core.NodeMetadata{
Retries: &core.RetryStrategy{
- Retries: uint32(test.nodeRetries),
+ Retries: test.nodeRetries,
},
},
}
@@ -42,7 +42,7 @@ func TestComputeRetryStrategy(t *testing.T) {
tmpl = &core.TaskTemplate{
Metadata: &core.TaskMetadata{
Retries: &core.RetryStrategy{
- Retries: uint32(test.taskRetries),
+ Retries: test.taskRetries,
},
},
}
@@ -51,7 +51,7 @@ func TestComputeRetryStrategy(t *testing.T) {
r := computeRetryStrategy(node, tmpl)
if test.expectedRetries != 0 {
assert.NotNil(t, r)
- assert.Equal(t, test.expectedRetries, *r.MinAttempts)
+ assert.Equal(t, int(test.expectedRetries), *r.MinAttempts) // #nosec G115
} else {
assert.Nil(t, r)
}
@@ -292,7 +292,7 @@ func TestStripInterfaceTypeMetadata(t *testing.T) {
}
stripped := StripInterfaceTypeMetadata(i)
- assert.Nil(t, stripped.Inputs.Variables["a"].Type.Metadata)
- assert.Nil(t, stripped.Outputs.Variables["a"].Type.Metadata)
+ assert.Nil(t, stripped.GetInputs().GetVariables()["a"].GetType().GetMetadata())
+ assert.Nil(t, stripped.GetOutputs().GetVariables()["a"].GetType().GetMetadata())
})
}
diff --git a/flytepropeller/pkg/compiler/transformers/k8s/workflow.go b/flytepropeller/pkg/compiler/transformers/k8s/workflow.go
index 2421ddf9bb..eb9023bfa2 100644
--- a/flytepropeller/pkg/compiler/transformers/k8s/workflow.go
+++ b/flytepropeller/pkg/compiler/transformers/k8s/workflow.go
@@ -39,13 +39,13 @@ func requiresInputs(w *core.WorkflowTemplate) bool {
return false
}
- return len(w.GetInterface().GetInputs().Variables) > 0
+ return len(w.GetInterface().GetInputs().GetVariables()) > 0
}
// Note: Update WorkflowNameFromID for any change made to WorkflowIDAsString
func WorkflowIDAsString(id *core.Identifier) string {
b := strings.Builder{}
- _, err := b.WriteString(id.Project)
+ _, err := b.WriteString(id.GetProject())
if err != nil {
return ""
}
@@ -55,7 +55,7 @@ func WorkflowIDAsString(id *core.Identifier) string {
return ""
}
- _, err = b.WriteString(id.Domain)
+ _, err = b.WriteString(id.GetDomain())
if err != nil {
return ""
}
@@ -65,7 +65,7 @@ func WorkflowIDAsString(id *core.Identifier) string {
return ""
}
- _, err = b.WriteString(id.Name)
+ _, err = b.WriteString(id.GetName())
if err != nil {
return ""
}
@@ -83,10 +83,10 @@ func WorkflowNameFromID(id string) string {
func buildFlyteWorkflowSpec(wf *core.CompiledWorkflow, tasks []*core.CompiledTask, errs errors.CompileErrors) (
spec *v1alpha1.WorkflowSpec, err error) {
- wf.Template.Interface = StripInterfaceTypeMetadata(wf.Template.Interface)
+ wf.Template.Interface = StripInterfaceTypeMetadata(wf.GetTemplate().GetInterface())
var failureN *v1alpha1.NodeSpec
- if n := wf.Template.GetFailureNode(); n != nil {
+ if n := wf.GetTemplate().GetFailureNode(); n != nil {
nodes, ok := buildNodeSpec(n, tasks, errs.NewScope())
if !ok {
return nil, errs
@@ -94,34 +94,34 @@ func buildFlyteWorkflowSpec(wf *core.CompiledWorkflow, tasks []*core.CompiledTas
failureN = nodes[0]
}
- nodes, _ := buildNodes(wf.Template.GetNodes(), tasks, errs.NewScope())
+ nodes, _ := buildNodes(wf.GetTemplate().GetNodes(), tasks, errs.NewScope())
if errs.HasErrors() {
return nil, errs
}
- outputBindings := make([]*v1alpha1.Binding, 0, len(wf.Template.Outputs))
- for _, b := range wf.Template.Outputs {
+ outputBindings := make([]*v1alpha1.Binding, 0, len(wf.GetTemplate().GetOutputs()))
+ for _, b := range wf.GetTemplate().GetOutputs() {
outputBindings = append(outputBindings, &v1alpha1.Binding{
Binding: b,
})
}
var outputs *v1alpha1.OutputVarMap
- if wf.Template.GetInterface() != nil {
- outputs = &v1alpha1.OutputVarMap{VariableMap: wf.Template.GetInterface().Outputs}
+ if wf.GetTemplate().GetInterface() != nil {
+ outputs = &v1alpha1.OutputVarMap{VariableMap: wf.GetTemplate().GetInterface().GetOutputs()}
} else {
outputs = &v1alpha1.OutputVarMap{VariableMap: &core.VariableMap{}}
}
failurePolicy := v1alpha1.WorkflowOnFailurePolicy(core.WorkflowMetadata_FAIL_IMMEDIATELY)
- if wf.Template != nil && wf.Template.Metadata != nil {
- failurePolicy = v1alpha1.WorkflowOnFailurePolicy(wf.Template.Metadata.OnFailure)
+ if wf.GetTemplate() != nil && wf.GetTemplate().GetMetadata() != nil {
+ failurePolicy = v1alpha1.WorkflowOnFailurePolicy(wf.GetTemplate().GetMetadata().GetOnFailure())
}
connections := buildConnections(wf)
return &v1alpha1.WorkflowSpec{
- ID: WorkflowIDAsString(wf.Template.Id),
+ ID: WorkflowIDAsString(wf.GetTemplate().GetId()),
OnFailure: failureN,
Nodes: nodes,
Outputs: outputs,
@@ -147,13 +147,13 @@ func generateName(wfID *core.Identifier, execID *core.WorkflowExecutionIdentifie
name string, generateName string, label string, project string, domain string, err error) {
if execID != nil {
- return execID.Name, "", execID.Name, execID.Project, execID.Domain, nil
+ return execID.GetName(), "", execID.GetName(), execID.GetProject(), execID.GetDomain(), nil
} else if wfID != nil {
- wid := fmt.Sprintf("%v%v%v", withSeparatorIfNotEmpty(wfID.Project), withSeparatorIfNotEmpty(wfID.Domain), wfID.Name)
+ wid := fmt.Sprintf("%v%v%v", withSeparatorIfNotEmpty(wfID.GetProject()), withSeparatorIfNotEmpty(wfID.GetDomain()), wfID.GetName())
// TODO: this is a hack until we figure out how to restrict generated names. K8s has a limitation of 63 chars
wid = wid[:minInt(32, len(wid))]
- return "", fmt.Sprintf("%v-", wid), wid, wfID.Project, wfID.Domain, nil
+ return "", fmt.Sprintf("%v-", wid), wid, wfID.GetProject(), wfID.GetDomain(), nil
} else {
return "", "", "", "", "", fmt.Errorf("expected param not set. wfID or execID must be non-nil values")
}
@@ -169,8 +169,8 @@ func BuildFlyteWorkflow(wfClosure *core.CompiledWorkflowClosure, inputs *core.Li
return nil, errs
}
- wf := wfClosure.Primary.Template
- tasks := wfClosure.Tasks
+ wf := wfClosure.GetPrimary().GetTemplate()
+ tasks := wfClosure.GetTasks()
// Fill in inputs in the start node.
if inputs != nil {
if ok := validateInputs(common.StartNodeID, wf.GetInterface(), *inputs, errs.NewScope()); !ok {
@@ -182,22 +182,22 @@ func BuildFlyteWorkflow(wfClosure *core.CompiledWorkflowClosure, inputs *core.Li
}
for _, t := range tasks {
- t.Template.Interface = StripInterfaceTypeMetadata(t.Template.Interface)
+ t.Template.Interface = StripInterfaceTypeMetadata(t.GetTemplate().GetInterface())
}
- primarySpec, err := buildFlyteWorkflowSpec(wfClosure.Primary, tasks, errs.NewScope())
+ primarySpec, err := buildFlyteWorkflowSpec(wfClosure.GetPrimary(), tasks, errs.NewScope())
if err != nil {
errs.Collect(errors.NewWorkflowBuildError(err))
return nil, errs
}
- subwfs := make(map[v1alpha1.WorkflowID]*v1alpha1.WorkflowSpec, len(wfClosure.SubWorkflows))
- for _, subWf := range wfClosure.SubWorkflows {
+ subwfs := make(map[v1alpha1.WorkflowID]*v1alpha1.WorkflowSpec, len(wfClosure.GetSubWorkflows()))
+ for _, subWf := range wfClosure.GetSubWorkflows() {
spec, err := buildFlyteWorkflowSpec(subWf, tasks, errs.NewScope())
if err != nil {
errs.Collect(errors.NewWorkflowBuildError(err))
} else {
- subwfs[subWf.Template.Id.String()] = spec
+ subwfs[subWf.GetTemplate().GetId().String()] = spec
}
}
@@ -266,7 +266,7 @@ func BuildFlyteWorkflow(wfClosure *core.CompiledWorkflowClosure, inputs *core.Li
func toMapOfLists(connections map[string]*core.ConnectionSet_IdList) map[string][]string {
res := make(map[string][]string, len(connections))
for key, val := range connections {
- res[key] = val.Ids
+ res[key] = val.GetIds()
}
return res
@@ -292,24 +292,24 @@ func BuildWfClosureCrdFields(wfClosure *core.CompiledWorkflowClosure) (*WfClosur
return nil, errs
}
- primarySpec, err := buildFlyteWorkflowSpec(wfClosure.Primary, wfClosure.Tasks, errs.NewScope())
+ primarySpec, err := buildFlyteWorkflowSpec(wfClosure.GetPrimary(), wfClosure.GetTasks(), errs.NewScope())
if err != nil {
errs.Collect(errors.NewWorkflowBuildError(err))
return nil, errs
}
- for _, t := range wfClosure.Tasks {
- t.Template.Interface = StripInterfaceTypeMetadata(t.Template.Interface)
+ for _, t := range wfClosure.GetTasks() {
+ t.Template.Interface = StripInterfaceTypeMetadata(t.GetTemplate().GetInterface())
}
- tasks := buildTasks(wfClosure.Tasks, errs.NewScope())
+ tasks := buildTasks(wfClosure.GetTasks(), errs.NewScope())
- subwfs := make(map[v1alpha1.WorkflowID]*v1alpha1.WorkflowSpec, len(wfClosure.SubWorkflows))
- for _, subWf := range wfClosure.SubWorkflows {
- spec, err := buildFlyteWorkflowSpec(subWf, wfClosure.Tasks, errs.NewScope())
+ subwfs := make(map[v1alpha1.WorkflowID]*v1alpha1.WorkflowSpec, len(wfClosure.GetSubWorkflows()))
+ for _, subWf := range wfClosure.GetSubWorkflows() {
+ spec, err := buildFlyteWorkflowSpec(subWf, wfClosure.GetTasks(), errs.NewScope())
if err != nil {
errs.Collect(errors.NewWorkflowBuildError(err))
} else {
- subwfs[subWf.Template.Id.String()] = spec
+ subwfs[subWf.GetTemplate().GetId().String()] = spec
}
}
diff --git a/flytepropeller/pkg/compiler/transformers/k8s/workflow_test.go b/flytepropeller/pkg/compiler/transformers/k8s/workflow_test.go
index dbb51e25eb..378343ec20 100644
--- a/flytepropeller/pkg/compiler/transformers/k8s/workflow_test.go
+++ b/flytepropeller/pkg/compiler/transformers/k8s/workflow_test.go
@@ -331,10 +331,10 @@ func TestBuildFlyteWorkflow_withBranch(t *testing.T) {
w := &core.CompiledWorkflowClosure{}
assert.NoError(t, utils.UnmarshalBytesToPb(c, w))
- assert.Len(t, w.Primary.Connections.Downstream, 2)
- ids := w.Primary.Connections.Downstream["start-node"]
- assert.Len(t, ids.Ids, 1)
- assert.Equal(t, ids.Ids[0], "n0")
+ assert.Len(t, w.GetPrimary().GetConnections().GetDownstream(), 2)
+ ids := w.GetPrimary().GetConnections().GetDownstream()["start-node"]
+ assert.Len(t, ids.GetIds(), 1)
+ assert.Equal(t, ids.GetIds()[0], "n0")
wf, err := BuildFlyteWorkflow(
w,
diff --git a/flytepropeller/pkg/compiler/validators/bindings.go b/flytepropeller/pkg/compiler/validators/bindings.go
index b69dda529f..fd317036fa 100644
--- a/flytepropeller/pkg/compiler/validators/bindings.go
+++ b/flytepropeller/pkg/compiler/validators/bindings.go
@@ -109,7 +109,7 @@ func validateBinding(w c.WorkflowBuilder, node c.Node, nodeParam string, binding
return nil, nil, !errs.HasErrors()
}
- if upNode, found := validateNodeID(w, val.Promise.NodeId, errs.NewScope()); found {
+ if upNode, found := validateNodeID(w, val.Promise.GetNodeId(), errs.NewScope()); found {
v, err := typing.ParseVarName(val.Promise.GetVar())
if err != nil {
errs.Collect(errors.NewSyntaxError(nodeID, val.Promise.GetVar(), err))
@@ -117,28 +117,28 @@ func validateBinding(w c.WorkflowBuilder, node c.Node, nodeParam string, binding
}
inputVar := nodeParam
- outputVar := val.Promise.Var
+ outputVar := val.Promise.GetVar()
if node.GetMetadata() != nil {
- inputVar = fmt.Sprintf("%s.%s", node.GetMetadata().Name, nodeParam)
+ inputVar = fmt.Sprintf("%s.%s", node.GetMetadata().GetName(), nodeParam)
}
if upNode.GetMetadata() != nil {
- outputVar = fmt.Sprintf("%s.%s", upNode.GetMetadata().Name, val.Promise.Var)
+ outputVar = fmt.Sprintf("%s.%s", upNode.GetMetadata().GetName(), val.Promise.GetVar())
}
if param, paramFound := validateOutputVar(upNode, v.Name, errs.NewScope()); paramFound {
- sourceType := param.Type
+ sourceType := param.GetType()
// If the variable has an index. We expect param to be a collection.
if v.Index != nil {
if cType := param.GetType().GetCollectionType(); cType == nil {
- errs.Collect(errors.NewMismatchingVariablesErr(nodeID, outputVar, c.LiteralTypeToStr(param.Type), inputVar, c.LiteralTypeToStr(expectedType)))
+ errs.Collect(errors.NewMismatchingVariablesErr(nodeID, outputVar, c.LiteralTypeToStr(param.GetType()), inputVar, c.LiteralTypeToStr(expectedType)))
} else {
sourceType = cType
}
}
// If the variable has an attribute path. Extract the type of the last attribute.
- for _, attr := range val.Promise.AttrPath {
+ for _, attr := range val.Promise.GetAttrPath() {
var tmpType *flyte.LiteralType
var exist bool
@@ -152,7 +152,7 @@ func validateBinding(w c.WorkflowBuilder, node c.Node, nodeParam string, binding
if !exist {
// the error should output the sourceType instead of tmpType because tmpType is nil
- errs.Collect(errors.NewFieldNotFoundErr(nodeID, val.Promise.Var, sourceType.String(), attr.GetStringValue()))
+ errs.Collect(errors.NewFieldNotFoundErr(nodeID, val.Promise.GetVar(), sourceType.String(), attr.GetStringValue()))
return nil, nil, !errs.HasErrors()
}
sourceType = tmpType
@@ -161,7 +161,7 @@ func validateBinding(w c.WorkflowBuilder, node c.Node, nodeParam string, binding
if !validateParamTypes || AreTypesCastable(sourceType, expectedType) {
val.Promise.NodeId = upNode.GetId()
- return param.GetType(), []c.NodeID{val.Promise.NodeId}, true
+ return param.GetType(), []c.NodeID{val.Promise.GetNodeId()}, true
}
errs.Collect(errors.NewMismatchingVariablesErr(node.GetId(), outputVar, c.LiteralTypeToStr(sourceType), inputVar, c.LiteralTypeToStr(expectedType)))
@@ -187,14 +187,14 @@ func validateBinding(w c.WorkflowBuilder, node c.Node, nodeParam string, binding
v := val.Scalar.GetPrimitive().GetStringValue()
// Let us assert that the bound value is a correct enum Value
found := false
- for _, ev := range expectedType.GetEnumType().Values {
+ for _, ev := range expectedType.GetEnumType().GetValues() {
if ev == v {
found = true
break
}
}
if !found {
- errs.Collect(errors.NewIllegalEnumValueError(nodeID, nodeParam, v, expectedType.GetEnumType().Values))
+ errs.Collect(errors.NewIllegalEnumValueError(nodeID, nodeParam, v, expectedType.GetEnumType().GetValues()))
}
}
@@ -237,7 +237,7 @@ func ValidateBindings(w c.WorkflowBuilder, node c.Node, bindings []*flyte.Bindin
providedBindings.Insert(binding.GetVar())
if resolvedType, upstreamNodes, bindingOk := validateBinding(w, node, binding.GetVar(), binding.GetBinding(),
- param.Type, errs.NewScope(), validateParamTypes); bindingOk {
+ param.GetType(), errs.NewScope(), validateParamTypes); bindingOk {
for _, upNode := range upstreamNodes {
// Add implicit Edges
switch edgeDirection {
@@ -259,7 +259,7 @@ func ValidateBindings(w c.WorkflowBuilder, node c.Node, bindings []*flyte.Bindin
// If we missed binding some params, add errors
if params != nil {
- for paramName, Variable := range params.Variables {
+ for paramName, Variable := range params.GetVariables() {
if !providedBindings.Has(paramName) && !IsOptionalType(*Variable) {
errs.Collect(errors.NewParameterNotBoundErr(node.GetId(), paramName))
}
@@ -271,10 +271,10 @@ func ValidateBindings(w c.WorkflowBuilder, node c.Node, bindings []*flyte.Bindin
// IsOptionalType Return true if there is a None type in Union Type
func IsOptionalType(variable flyte.Variable) bool {
- if variable.Type.GetUnionType() == nil {
+ if variable.GetType().GetUnionType() == nil {
return false
}
- for _, variant := range variable.Type.GetUnionType().Variants {
+ for _, variant := range variable.GetType().GetUnionType().GetVariants() {
if flyte.SimpleType_NONE == variant.GetSimple() {
return true
}
diff --git a/flytepropeller/pkg/compiler/validators/bindings_test.go b/flytepropeller/pkg/compiler/validators/bindings_test.go
index bcb498eebd..e817ba5d5c 100644
--- a/flytepropeller/pkg/compiler/validators/bindings_test.go
+++ b/flytepropeller/pkg/compiler/validators/bindings_test.go
@@ -24,8 +24,8 @@ func LiteralToBinding(l *core.Literal) *core.BindingData {
},
}
case *core.Literal_Collection:
- x := make([]*core.BindingData, 0, len(l.GetCollection().Literals))
- for _, sub := range l.GetCollection().Literals {
+ x := make([]*core.BindingData, 0, len(l.GetCollection().GetLiterals()))
+ for _, sub := range l.GetCollection().GetLiterals() {
x = append(x, LiteralToBinding(sub))
}
@@ -37,8 +37,8 @@ func LiteralToBinding(l *core.Literal) *core.BindingData {
},
}
case *core.Literal_Map:
- x := make(map[string]*core.BindingData, len(l.GetMap().Literals))
- for key, val := range l.GetMap().Literals {
+ x := make(map[string]*core.BindingData, len(l.GetMap().GetLiterals()))
+ for key, val := range l.GetMap().GetLiterals() {
x[key] = LiteralToBinding(val)
}
@@ -63,7 +63,7 @@ func TestValidateBindings(t *testing.T) {
compileErrors := compilerErrors.NewCompileErrors()
resolved, ok := ValidateBindings(wf, n, bindings, vars, true, c.EdgeDirectionBidirectional, compileErrors)
assert.True(t, ok)
- assert.Empty(t, resolved.Variables)
+ assert.Empty(t, resolved.GetVariables())
})
t.Run("Variable not in inputs", func(t *testing.T) {
diff --git a/flytepropeller/pkg/compiler/validators/branch.go b/flytepropeller/pkg/compiler/validators/branch.go
index 386f1cecda..94e4bea7ad 100644
--- a/flytepropeller/pkg/compiler/validators/branch.go
+++ b/flytepropeller/pkg/compiler/validators/branch.go
@@ -18,17 +18,17 @@ func validateBranchInterface(w c.WorkflowBuilder, node c.NodeBuilder, errs error
return nil, false
}
- if ifBlock := node.GetBranchNode().IfElse; ifBlock == nil {
+ if ifBlock := node.GetBranchNode().GetIfElse(); ifBlock == nil {
errs.Collect(errors.NewValueRequiredErr(node.GetId(), "Branch.IfElse"))
return nil, false
}
- if ifCase := node.GetBranchNode().IfElse.Case; ifCase == nil {
+ if ifCase := node.GetBranchNode().GetIfElse().GetCase(); ifCase == nil {
errs.Collect(errors.NewValueRequiredErr(node.GetId(), "Branch.IfElse.Case"))
return nil, false
}
- if thenNode := node.GetBranchNode().IfElse.Case.ThenNode; thenNode == nil {
+ if thenNode := node.GetBranchNode().GetIfElse().GetCase().GetThenNode(); thenNode == nil {
errs.Collect(errors.NewValueRequiredErr(node.GetId(), "Branch.IfElse.Case.ThenNode"))
return nil, false
}
@@ -37,33 +37,33 @@ func validateBranchInterface(w c.WorkflowBuilder, node c.NodeBuilder, errs error
finalOutputParameterNames := sets.NewString()
validateIfaceMatch := func(nodeId string, iface2 *flyte.TypedInterface, errsScope errors.CompileErrors) (match bool) {
- outputs2, outputs2Set := buildVariablesIndex(iface2.Outputs)
+ outputs2, outputs2Set := buildVariablesIndex(iface2.GetOutputs())
// Validate that parameters that exist in both interfaces have compatible types.
finalOutputParameterNames = finalOutputParameterNames.Intersection(outputs2Set)
for paramName := range finalOutputParameterNames {
- if validateVarType(nodeId, paramName, outputs[paramName], outputs2[paramName].Type, errs.NewScope()) {
- validateVarType(nodeId, paramName, outputs2[paramName], outputs[paramName].Type, errs.NewScope())
+ if validateVarType(nodeId, paramName, outputs[paramName], outputs2[paramName].GetType(), errs.NewScope()) {
+ validateVarType(nodeId, paramName, outputs2[paramName], outputs[paramName].GetType(), errs.NewScope())
}
}
return !errsScope.HasErrors()
}
- cases := make([]*flyte.Node, 0, len(node.GetBranchNode().IfElse.Other)+1)
- caseBlock := node.GetBranchNode().IfElse.Case
- cases = append(cases, caseBlock.ThenNode)
+ cases := make([]*flyte.Node, 0, len(node.GetBranchNode().GetIfElse().GetOther())+1)
+ caseBlock := node.GetBranchNode().GetIfElse().GetCase()
+ cases = append(cases, caseBlock.GetThenNode())
- otherCases := node.GetBranchNode().IfElse.Other
+ otherCases := node.GetBranchNode().GetIfElse().GetOther()
for _, otherCase := range otherCases {
- if otherCase.ThenNode == nil {
+ if otherCase.GetThenNode() == nil {
errs.Collect(errors.NewValueRequiredErr(node.GetId(), "IfElse.Case.ThenNode"))
continue
}
- cases = append(cases, otherCase.ThenNode)
+ cases = append(cases, otherCase.GetThenNode())
}
- if elseNode := node.GetBranchNode().IfElse.GetElseNode(); elseNode != nil {
+ if elseNode := node.GetBranchNode().GetIfElse().GetElseNode(); elseNode != nil {
cases = append(cases, elseNode)
}
@@ -79,12 +79,12 @@ func validateBranchInterface(w c.WorkflowBuilder, node c.NodeBuilder, errs error
// match. We will pull the inputs needed for the underlying branch node at runtime.
iface2 = &flyte.TypedInterface{
Inputs: &flyte.VariableMap{Variables: map[string]*flyte.Variable{}},
- Outputs: iface2.Outputs,
+ Outputs: iface2.GetOutputs(),
}
if iface == nil {
iface = iface2
- outputs, finalOutputParameterNames = buildVariablesIndex(iface.Outputs)
+ outputs, finalOutputParameterNames = buildVariablesIndex(iface.GetOutputs())
} else {
validateIfaceMatch(n.GetId(), iface2, errs.NewScope())
}
@@ -99,7 +99,7 @@ func validateBranchInterface(w c.WorkflowBuilder, node c.NodeBuilder, errs error
if !errs.HasErrors() && iface != nil {
iface = &flyte.TypedInterface{
Inputs: inputVarsFromBindings,
- Outputs: filterVariables(iface.Outputs, finalOutputParameterNames),
+ Outputs: filterVariables(iface.GetOutputs(), finalOutputParameterNames),
}
} else {
iface = nil
diff --git a/flytepropeller/pkg/compiler/validators/condition.go b/flytepropeller/pkg/compiler/validators/condition.go
index 70b72cde8a..c402040135 100644
--- a/flytepropeller/pkg/compiler/validators/condition.go
+++ b/flytepropeller/pkg/compiler/validators/condition.go
@@ -48,8 +48,8 @@ func ValidateBooleanExpression(w c.WorkflowBuilder, node c.NodeBuilder, expr *fl
}
}
} else if expr.GetConjunction() != nil {
- ValidateBooleanExpression(w, node, expr.GetConjunction().LeftExpression, requireParamType, errs.NewScope())
- ValidateBooleanExpression(w, node, expr.GetConjunction().RightExpression, requireParamType, errs.NewScope())
+ ValidateBooleanExpression(w, node, expr.GetConjunction().GetLeftExpression(), requireParamType, errs.NewScope())
+ ValidateBooleanExpression(w, node, expr.GetConjunction().GetRightExpression(), requireParamType, errs.NewScope())
} else {
errs.Collect(errors.NewValueRequiredErr(node.GetId(), "Expr"))
}
diff --git a/flytepropeller/pkg/compiler/validators/interface.go b/flytepropeller/pkg/compiler/validators/interface.go
index a71c52e49a..fe22a9fb48 100644
--- a/flytepropeller/pkg/compiler/validators/interface.go
+++ b/flytepropeller/pkg/compiler/validators/interface.go
@@ -17,14 +17,14 @@ func ValidateInterface(nodeID c.NodeID, iface *core.TypedInterface, errs errors.
}
// validate InputsRef/OutputsRef parameters required attributes are set
- if iface.Inputs != nil && iface.Inputs.Variables != nil {
- validateVariables(nodeID, iface.Inputs, errs.NewScope())
+ if iface.GetInputs() != nil && iface.Inputs.Variables != nil {
+ validateVariables(nodeID, iface.GetInputs(), errs.NewScope())
} else {
iface.Inputs = &core.VariableMap{Variables: map[string]*core.Variable{}}
}
- if iface.Outputs != nil && iface.Outputs.Variables != nil {
- validateVariables(nodeID, iface.Outputs, errs.NewScope())
+ if iface.GetOutputs() != nil && iface.Outputs.Variables != nil {
+ validateVariables(nodeID, iface.GetOutputs(), errs.NewScope())
} else {
iface.Outputs = &core.VariableMap{Variables: map[string]*core.Variable{}}
}
@@ -55,8 +55,8 @@ func ValidateUnderlyingInterface(w c.WorkflowBuilder, node c.NodeBuilder, errs e
errs.Collect(errors.NewTaskReferenceNotFoundErr(node.GetId(), node.GetTaskNode().GetReferenceId().String()))
}
case *core.Node_WorkflowNode:
- if node.GetWorkflowNode().GetLaunchplanRef().String() == w.GetCoreWorkflow().Template.Id.String() {
- iface = w.GetCoreWorkflow().Template.Interface
+ if node.GetWorkflowNode().GetLaunchplanRef().String() == w.GetCoreWorkflow().GetTemplate().GetId().String() {
+ iface = w.GetCoreWorkflow().GetTemplate().GetInterface()
if iface == nil {
errs.Collect(errors.NewValueRequiredErr(node.GetId(), "WorkflowNode.Interface"))
}
@@ -75,11 +75,11 @@ func ValidateUnderlyingInterface(w c.WorkflowBuilder, node c.NodeBuilder, errs e
// Compute exposed inputs as the union of all required inputs and any input overwritten by the node.
exposedInputs := map[string]*core.Variable{}
if inputs != nil && inputs.Parameters != nil {
- for name, p := range inputs.Parameters {
+ for name, p := range inputs.GetParameters() {
if p.GetRequired() {
- exposedInputs[name] = p.Var
+ exposedInputs[name] = p.GetVar()
} else if containsBindingByVariableName(node.GetInputs(), name) {
- exposedInputs[name] = p.Var
+ exposedInputs[name] = p.GetVar()
}
// else, the param has a default value and is not being overwritten by the node
}
@@ -98,10 +98,10 @@ func ValidateUnderlyingInterface(w c.WorkflowBuilder, node c.NodeBuilder, errs e
}
} else if node.GetWorkflowNode().GetSubWorkflowRef() != nil {
if wf, wfOk := w.GetSubWorkflow(node.GetWorkflowNode().GetSubWorkflowRef()); wfOk {
- if wf.Template == nil {
+ if wf.GetTemplate() == nil {
errs.Collect(errors.NewValueRequiredErr(node.GetId(), "WorkflowNode.Template"))
} else {
- iface = wf.Template.Interface
+ iface = wf.GetTemplate().GetInterface()
if iface == nil {
errs.Collect(errors.NewValueRequiredErr(node.GetId(), "WorkflowNode.Template.Interface"))
}
@@ -155,7 +155,7 @@ func ValidateUnderlyingInterface(w c.WorkflowBuilder, node c.NodeBuilder, errs e
}
case *core.Node_ArrayNode:
arrayNode := node.GetArrayNode()
- underlyingNodeBuilder := w.GetOrCreateNodeBuilder(arrayNode.Node)
+ underlyingNodeBuilder := w.GetOrCreateNodeBuilder(arrayNode.GetNode())
if underlyingIface, ok := ValidateUnderlyingInterface(w, underlyingNodeBuilder, errs.NewScope()); ok {
// ArrayNode interface should be inferred from the underlying node interface. flytekit
// will correct wrap variables in collections as needed, leaving partials as is.
diff --git a/flytepropeller/pkg/compiler/validators/interface_test.go b/flytepropeller/pkg/compiler/validators/interface_test.go
index 85c031c0a7..ba987bda62 100644
--- a/flytepropeller/pkg/compiler/validators/interface_test.go
+++ b/flytepropeller/pkg/compiler/validators/interface_test.go
@@ -66,10 +66,10 @@ func assertNonEmptyInterface(t testing.TB, iface *core.TypedInterface, ifaceOk b
t.Fatal(errs)
}
- assert.NotNil(t, iface.Inputs)
- assert.NotNil(t, iface.Inputs.Variables)
- assert.NotNil(t, iface.Outputs)
- assert.NotNil(t, iface.Outputs.Variables)
+ assert.NotNil(t, iface.GetInputs())
+ assert.NotNil(t, iface.GetInputs().GetVariables())
+ assert.NotNil(t, iface.GetOutputs())
+ assert.NotNil(t, iface.GetOutputs().GetVariables())
}
func TestValidateUnderlyingInterface(t *testing.T) {
@@ -419,8 +419,8 @@ func TestValidateUnderlyingInterface(t *testing.T) {
taskNodeBuilder := &mocks.NodeBuilder{}
taskNodeBuilder.On("GetCoreNode").Return(taskNode)
- taskNodeBuilder.On("GetId").Return(taskNode.Id)
- taskNodeBuilder.On("GetTaskNode").Return(taskNode.Target.(*core.Node_TaskNode).TaskNode)
+ taskNodeBuilder.On("GetId").Return(taskNode.GetId())
+ taskNodeBuilder.On("GetTaskNode").Return(taskNode.GetTarget().(*core.Node_TaskNode).TaskNode)
taskNodeBuilder.On("GetInterface").Return(nil)
taskNodeBuilder.On("SetInterface", mock.AnythingOfType("*core.TypedInterface")).Return(nil)
@@ -431,7 +431,7 @@ func TestValidateUnderlyingInterface(t *testing.T) {
}).String()
})).Return(&task, true)
wfBuilder.On("GetOrCreateNodeBuilder", mock.MatchedBy(func(node *core.Node) bool {
- return node.Id == "node_1"
+ return node.GetId() == "node_1"
})).Return(taskNodeBuilder)
// mock array node
@@ -445,9 +445,9 @@ func TestValidateUnderlyingInterface(t *testing.T) {
}
nodeBuilder := mocks.NodeBuilder{}
- nodeBuilder.On("GetArrayNode").Return(arrayNode.Target.(*core.Node_ArrayNode).ArrayNode)
+ nodeBuilder.On("GetArrayNode").Return(arrayNode.GetTarget().(*core.Node_ArrayNode).ArrayNode)
nodeBuilder.On("GetCoreNode").Return(arrayNode)
- nodeBuilder.On("GetId").Return(arrayNode.Id)
+ nodeBuilder.On("GetId").Return(arrayNode.GetId())
nodeBuilder.On("GetInterface").Return(nil)
nodeBuilder.On("SetInterface", mock.Anything).Return()
diff --git a/flytepropeller/pkg/compiler/validators/node.go b/flytepropeller/pkg/compiler/validators/node.go
index ad43abdce3..1b8b97ae12 100644
--- a/flytepropeller/pkg/compiler/validators/node.go
+++ b/flytepropeller/pkg/compiler/validators/node.go
@@ -15,19 +15,19 @@ func validateEffectiveOutputParameters(n c.NodeBuilder, errs errors.CompileError
params *flyte.VariableMap, ok bool) {
aliases := make(map[string]string, len(n.GetOutputAliases()))
for _, alias := range n.GetOutputAliases() {
- if _, found := aliases[alias.Var]; found {
- errs.Collect(errors.NewDuplicateAliasErr(n.GetId(), alias.Alias))
+ if _, found := aliases[alias.GetVar()]; found {
+ errs.Collect(errors.NewDuplicateAliasErr(n.GetId(), alias.GetAlias()))
} else {
- aliases[alias.Var] = alias.Alias
+ aliases[alias.GetVar()] = alias.GetAlias()
}
}
if n.GetInterface() != nil {
params = &flyte.VariableMap{
- Variables: make(map[string]*flyte.Variable, len(n.GetInterface().GetOutputs().Variables)),
+ Variables: make(map[string]*flyte.Variable, len(n.GetInterface().GetOutputs().GetVariables())),
}
- for paramName, param := range n.GetInterface().GetOutputs().Variables {
+ for paramName, param := range n.GetInterface().GetOutputs().GetVariables() {
if alias, found := aliases[paramName]; found {
if newParam, paramOk := withVariableName(param); paramOk {
params.Variables[alias] = newParam
@@ -57,19 +57,19 @@ func branchNodeIDFormatter(parentNodeID, thenNodeID string) string {
func ValidateBranchNode(w c.WorkflowBuilder, n c.NodeBuilder, requireParamType bool, errs errors.CompileErrors) (
discoveredNodes []c.NodeBuilder, ok bool) {
- cases := make([]*flyte.IfBlock, 0, len(n.GetBranchNode().IfElse.Other)+1)
- if n.GetBranchNode().IfElse.Case == nil {
+ cases := make([]*flyte.IfBlock, 0, len(n.GetBranchNode().GetIfElse().GetOther())+1)
+ if n.GetBranchNode().GetIfElse().GetCase() == nil {
errs.Collect(errors.NewBranchNodeHasNoCondition(n.GetId()))
} else {
- cases = append(cases, n.GetBranchNode().IfElse.Case)
+ cases = append(cases, n.GetBranchNode().GetIfElse().GetCase())
}
- cases = append(cases, n.GetBranchNode().IfElse.Other...)
+ cases = append(cases, n.GetBranchNode().GetIfElse().GetOther()...)
discoveredNodes = make([]c.NodeBuilder, 0, len(cases))
subNodes := make([]c.NodeBuilder, 0, len(cases)+1)
for _, block := range cases {
// Validate condition
- ValidateBooleanExpression(w, n, block.Condition, requireParamType, errs.NewScope())
+ ValidateBooleanExpression(w, n, block.GetCondition(), requireParamType, errs.NewScope())
if block.GetThenNode() == nil {
errs.Collect(errors.NewBranchNodeNotSpecified(n.GetId()))
@@ -79,10 +79,10 @@ func ValidateBranchNode(w c.WorkflowBuilder, n c.NodeBuilder, requireParamType b
}
}
- if elseNode := n.GetBranchNode().IfElse.GetElseNode(); elseNode != nil {
+ if elseNode := n.GetBranchNode().GetIfElse().GetElseNode(); elseNode != nil {
wrapperNode := w.GetOrCreateNodeBuilder(elseNode)
subNodes = append(subNodes, wrapperNode)
- } else if defaultElse := n.GetBranchNode().IfElse.GetDefault(); defaultElse == nil {
+ } else if defaultElse := n.GetBranchNode().GetIfElse().GetDefault(); defaultElse == nil {
errs.Collect(errors.NewBranchNodeHasNoDefault(n.GetId()))
}
@@ -126,7 +126,7 @@ func ValidateNode(w c.WorkflowBuilder, n c.NodeBuilder, validateConditionTypes b
}
// Order upstream node ids to ensure consistent output of the compiler even if client ordering changes.
- sort.Strings(n.GetCoreNode().UpstreamNodeIds)
+ sort.Strings(n.GetCoreNode().GetUpstreamNodeIds())
// Validate branch node conditions and inner nodes.
if n.GetBranchNode() != nil {
diff --git a/flytepropeller/pkg/compiler/validators/node_test.go b/flytepropeller/pkg/compiler/validators/node_test.go
index 3982b71344..642f568593 100644
--- a/flytepropeller/pkg/compiler/validators/node_test.go
+++ b/flytepropeller/pkg/compiler/validators/node_test.go
@@ -64,7 +64,7 @@ func TestValidateNode(t *testing.T) {
coreN.UpstreamNodeIds = []string{"n1", "n0"}
n.OnGetCoreNode().Return(coreN)
n.On("GetUpstreamNodeIds").Return(func() []string {
- return coreN.UpstreamNodeIds
+ return coreN.GetUpstreamNodeIds()
})
wf := &mocks.WorkflowBuilder{}
diff --git a/flytepropeller/pkg/compiler/validators/typing.go b/flytepropeller/pkg/compiler/validators/typing.go
index 2bde60b47b..66847410f7 100644
--- a/flytepropeller/pkg/compiler/validators/typing.go
+++ b/flytepropeller/pkg/compiler/validators/typing.go
@@ -136,7 +136,7 @@ func (t schemaTypeChecker) CastsFrom(upstreamType *flyte.LiteralType) bool {
}
// Flyte Schema can only be serialized to parquet
- if len(structuredDatasetType.Format) != 0 && !strings.EqualFold(structuredDatasetType.Format, "parquet") {
+ if len(structuredDatasetType.GetFormat()) != 0 && !strings.EqualFold(structuredDatasetType.GetFormat(), "parquet") {
return false
}
@@ -168,7 +168,7 @@ func (t structuredDatasetChecker) CastsFrom(upstreamType *flyte.LiteralType) boo
}
if schemaType != nil {
// Flyte Schema can only be serialized to parquet
- format := t.literalType.GetStructuredDatasetType().Format
+ format := t.literalType.GetStructuredDatasetType().GetFormat()
if len(format) != 0 && !strings.EqualFold(format, "parquet") {
return false
}
@@ -179,22 +179,22 @@ func (t structuredDatasetChecker) CastsFrom(upstreamType *flyte.LiteralType) boo
// Upstream (schema) -> downstream (schema)
func schemaCastFromSchema(upstream *flyte.SchemaType, downstream *flyte.SchemaType) bool {
- if len(upstream.Columns) == 0 || len(downstream.Columns) == 0 {
+ if len(upstream.GetColumns()) == 0 || len(downstream.GetColumns()) == 0 {
return true
}
nameToTypeMap := make(map[string]flyte.SchemaType_SchemaColumn_SchemaColumnType)
- for _, column := range upstream.Columns {
- nameToTypeMap[column.Name] = column.Type
+ for _, column := range upstream.GetColumns() {
+ nameToTypeMap[column.GetName()] = column.GetType()
}
// Check that the downstream schema is a strict sub-set of the upstream schema.
- for _, column := range downstream.Columns {
- upstreamType, ok := nameToTypeMap[column.Name]
+ for _, column := range downstream.GetColumns() {
+ upstreamType, ok := nameToTypeMap[column.GetName()]
if !ok {
return false
}
- if upstreamType != column.Type {
+ if upstreamType != column.GetType() {
return false
}
}
@@ -244,26 +244,26 @@ func (t unionTypeChecker) CastsFrom(upstreamType *flyte.LiteralType) bool {
// Upstream (structuredDatasetType) -> downstream (structuredDatasetType)
func structuredDatasetCastFromStructuredDataset(upstream *flyte.StructuredDatasetType, downstream *flyte.StructuredDatasetType) bool {
// Skip the format check here when format is empty. https://github.com/flyteorg/flyte/issues/2864
- if len(upstream.Format) != 0 && len(downstream.Format) != 0 && !strings.EqualFold(upstream.Format, downstream.Format) {
+ if len(upstream.GetFormat()) != 0 && len(downstream.GetFormat()) != 0 && !strings.EqualFold(upstream.GetFormat(), downstream.GetFormat()) {
return false
}
- if len(upstream.Columns) == 0 || len(downstream.Columns) == 0 {
+ if len(upstream.GetColumns()) == 0 || len(downstream.GetColumns()) == 0 {
return true
}
nameToTypeMap := make(map[string]*flyte.LiteralType)
- for _, column := range upstream.Columns {
- nameToTypeMap[column.Name] = column.LiteralType
+ for _, column := range upstream.GetColumns() {
+ nameToTypeMap[column.GetName()] = column.GetLiteralType()
}
// Check that the downstream structured dataset is a strict sub-set of the upstream structured dataset.
- for _, column := range downstream.Columns {
- upstreamType, ok := nameToTypeMap[column.Name]
+ for _, column := range downstream.GetColumns() {
+ upstreamType, ok := nameToTypeMap[column.GetName()]
if !ok {
return false
}
- if !getTypeChecker(column.LiteralType).CastsFrom(upstreamType) {
+ if !getTypeChecker(column.GetLiteralType()).CastsFrom(upstreamType) {
return false
}
}
@@ -272,21 +272,21 @@ func structuredDatasetCastFromStructuredDataset(upstream *flyte.StructuredDatase
// Upstream (schemaType) -> downstream (structuredDatasetType)
func structuredDatasetCastFromSchema(upstream *flyte.SchemaType, downstream *flyte.StructuredDatasetType) bool {
- if len(upstream.Columns) == 0 || len(downstream.Columns) == 0 {
+ if len(upstream.GetColumns()) == 0 || len(downstream.GetColumns()) == 0 {
return true
}
nameToTypeMap := make(map[string]flyte.SchemaType_SchemaColumn_SchemaColumnType)
- for _, column := range upstream.Columns {
- nameToTypeMap[column.Name] = column.GetType()
+ for _, column := range upstream.GetColumns() {
+ nameToTypeMap[column.GetName()] = column.GetType()
}
// Check that the downstream structuredDataset is a strict sub-set of the upstream schema.
- for _, column := range downstream.Columns {
- upstreamType, ok := nameToTypeMap[column.Name]
+ for _, column := range downstream.GetColumns() {
+ upstreamType, ok := nameToTypeMap[column.GetName()]
if !ok {
return false
}
- if !schemaTypeIsMatchStructuredDatasetType(upstreamType, column.LiteralType.GetSimple()) {
+ if !schemaTypeIsMatchStructuredDatasetType(upstreamType, column.GetLiteralType().GetSimple()) {
return false
}
}
@@ -295,17 +295,17 @@ func structuredDatasetCastFromSchema(upstream *flyte.SchemaType, downstream *fly
// Upstream (structuredDatasetType) -> downstream (schemaType)
func schemaCastFromStructuredDataset(upstream *flyte.StructuredDatasetType, downstream *flyte.SchemaType) bool {
- if len(upstream.Columns) == 0 || len(downstream.Columns) == 0 {
+ if len(upstream.GetColumns()) == 0 || len(downstream.GetColumns()) == 0 {
return true
}
nameToTypeMap := make(map[string]flyte.SimpleType)
- for _, column := range upstream.Columns {
- nameToTypeMap[column.Name] = column.LiteralType.GetSimple()
+ for _, column := range upstream.GetColumns() {
+ nameToTypeMap[column.GetName()] = column.GetLiteralType().GetSimple()
}
// Check that the downstream schema is a strict sub-set of the upstream structuredDataset.
- for _, column := range downstream.Columns {
- upstreamType, ok := nameToTypeMap[column.Name]
+ for _, column := range downstream.GetColumns() {
+ upstreamType, ok := nameToTypeMap[column.GetName()]
if !ok {
return false
}
diff --git a/flytepropeller/pkg/compiler/validators/utils.go b/flytepropeller/pkg/compiler/validators/utils.go
index d3e533e00d..c053a8b399 100644
--- a/flytepropeller/pkg/compiler/validators/utils.go
+++ b/flytepropeller/pkg/compiler/validators/utils.go
@@ -14,7 +14,7 @@ import (
func containsBindingByVariableName(bindings []*core.Binding, name string) (found bool) {
for _, b := range bindings {
- if b.Var == name {
+ if b.GetVar() == name {
return true
}
}
@@ -27,7 +27,7 @@ func findVariableByName(vars *core.VariableMap, name string) (variable *core.Var
return nil, false
}
- variable, found = vars.Variables[name]
+ variable, found = vars.GetVariables()[name]
return
}
@@ -48,7 +48,7 @@ func literalTypeForScalar(scalar *core.Scalar) *core.LiteralType {
// If the binary has a tag, treat it as a structured type (e.g., dict, dataclass, Pydantic BaseModel).
// Otherwise, treat it as raw binary data.
// Reference: https://github.com/flyteorg/flyte/blob/master/rfc/system/5741-binary-idl-with-message-pack.md
- if v.Binary.Tag == coreutils.MESSAGEPACK {
+ if v.Binary.GetTag() == coreutils.MESSAGEPACK {
literalType = &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_STRUCT}}
} else {
literalType = &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_BINARY}}
@@ -56,11 +56,11 @@ func literalTypeForScalar(scalar *core.Scalar) *core.LiteralType {
case *core.Scalar_Schema:
literalType = &core.LiteralType{
Type: &core.LiteralType_Schema{
- Schema: scalar.GetSchema().Type,
+ Schema: scalar.GetSchema().GetType(),
},
}
case *core.Scalar_StructuredDataset:
- if v.StructuredDataset == nil || v.StructuredDataset.Metadata == nil {
+ if v.StructuredDataset == nil || v.StructuredDataset.GetMetadata() == nil {
return &core.LiteralType{
Type: &core.LiteralType_StructuredDatasetType{},
}
@@ -68,7 +68,7 @@ func literalTypeForScalar(scalar *core.Scalar) *core.LiteralType {
literalType = &core.LiteralType{
Type: &core.LiteralType_StructuredDatasetType{
- StructuredDatasetType: scalar.GetStructuredDataset().GetMetadata().StructuredDatasetType,
+ StructuredDatasetType: scalar.GetStructuredDataset().GetMetadata().GetStructuredDatasetType(),
},
}
case *core.Scalar_NoneType:
@@ -115,9 +115,9 @@ func literalTypeForPrimitive(primitive *core.Primitive) *core.LiteralType {
}
func buildVariablesIndex(params *core.VariableMap) (map[string]*core.Variable, sets.String) {
- paramMap := make(map[string]*core.Variable, len(params.Variables))
+ paramMap := make(map[string]*core.Variable, len(params.GetVariables()))
paramSet := sets.NewString()
- for paramName, param := range params.Variables {
+ for paramName, param := range params.GetVariables() {
paramMap[paramName] = param
paramSet.Insert(paramName)
}
@@ -130,7 +130,7 @@ func filterVariables(vars *core.VariableMap, varNames sets.String) *core.Variabl
Variables: make(map[string]*core.Variable, len(varNames)),
}
- for paramName, param := range vars.Variables {
+ for paramName, param := range vars.GetVariables() {
if varNames.Has(paramName) {
res.Variables[paramName] = param
}
@@ -158,9 +158,9 @@ func UnionDistinctVariableMaps(m1, m2 map[string]*core.Variable) (map[string]*co
for k, v := range m2 {
if existingV, exists := res[k]; exists {
- if v.Type.String() != existingV.Type.String() {
+ if v.GetType().String() != existingV.GetType().String() {
return nil, fmt.Errorf("key already exists with a different type. %v has type [%v] on one side "+
- "and type [%v] on the other", k, existingV.Type.String(), v.Type.String())
+ "and type [%v] on the other", k, existingV.GetType().String(), v.GetType().String())
}
}
@@ -178,7 +178,7 @@ func buildMultipleTypeUnion(innerType []*core.LiteralType) *core.LiteralType {
unionType := x.GetCollectionType().GetUnionType()
if unionType != nil {
isNested = true
- variants = append(variants, unionType.Variants...)
+ variants = append(variants, unionType.GetVariants()...)
} else {
variants = append(variants, x)
}
@@ -279,7 +279,7 @@ func LiteralTypeForLiteral(l *core.Literal) *core.LiteralType {
case *core.Literal_Scalar:
return literalTypeForScalar(l.GetScalar())
case *core.Literal_Collection:
- collectionType, isOffloaded := literalTypeForLiterals(l.GetCollection().Literals)
+ collectionType, isOffloaded := literalTypeForLiterals(l.GetCollection().GetLiterals())
if isOffloaded {
return collectionType
}
@@ -289,7 +289,7 @@ func LiteralTypeForLiteral(l *core.Literal) *core.LiteralType {
},
}
case *core.Literal_Map:
- mapValueType, isOffloaded := literalTypeForLiterals(maps.Values(l.GetMap().Literals))
+ mapValueType, isOffloaded := literalTypeForLiterals(maps.Values(l.GetMap().GetLiterals()))
if isOffloaded {
return mapValueType
}
diff --git a/flytepropeller/pkg/compiler/validators/utils_test.go b/flytepropeller/pkg/compiler/validators/utils_test.go
index dd32a98a53..41a1333e62 100644
--- a/flytepropeller/pkg/compiler/validators/utils_test.go
+++ b/flytepropeller/pkg/compiler/validators/utils_test.go
@@ -114,9 +114,9 @@ func TestLiteralTypeForLiterals(t *testing.T) {
coreutils.MustMakeLiteral(2),
})
- assert.Len(t, lt.GetUnionType().Variants, 2)
- assert.Equal(t, core.SimpleType_INTEGER.String(), lt.GetUnionType().Variants[0].GetSimple().String())
- assert.Equal(t, core.SimpleType_STRING.String(), lt.GetUnionType().Variants[1].GetSimple().String())
+ assert.Len(t, lt.GetUnionType().GetVariants(), 2)
+ assert.Equal(t, core.SimpleType_INTEGER.String(), lt.GetUnionType().GetVariants()[0].GetSimple().String())
+ assert.Equal(t, core.SimpleType_STRING.String(), lt.GetUnionType().GetVariants()[1].GetSimple().String())
assert.False(t, isOffloaded)
})
@@ -128,9 +128,9 @@ func TestLiteralTypeForLiterals(t *testing.T) {
coreutils.MustMakeLiteral(2),
})
- assert.Len(t, lt.GetUnionType().Variants, 2)
- assert.Equal(t, core.SimpleType_INTEGER.String(), lt.GetUnionType().Variants[0].GetSimple().String())
- assert.Equal(t, core.SimpleType_STRING.String(), lt.GetUnionType().Variants[1].GetSimple().String())
+ assert.Len(t, lt.GetUnionType().GetVariants(), 2)
+ assert.Equal(t, core.SimpleType_INTEGER.String(), lt.GetUnionType().GetVariants()[0].GetSimple().String())
+ assert.Equal(t, core.SimpleType_STRING.String(), lt.GetUnionType().GetVariants()[1].GetSimple().String())
assert.False(t, isOffloaded)
})
diff --git a/flytepropeller/pkg/compiler/validators/vars.go b/flytepropeller/pkg/compiler/validators/vars.go
index e114dc4fc0..445dd258bd 100644
--- a/flytepropeller/pkg/compiler/validators/vars.go
+++ b/flytepropeller/pkg/compiler/validators/vars.go
@@ -48,12 +48,12 @@ func validateVarType(nodeID c.NodeID, paramName string, param *flyte.Variable,
// Validate parameters have their required attributes set
func validateVariables(nodeID c.NodeID, params *flyte.VariableMap, errs errors.CompileErrors) {
- for paramName, param := range params.Variables {
+ for paramName, param := range params.GetVariables() {
if len(paramName) == 0 {
errs.Collect(errors.NewValueRequiredErr(nodeID, "paramName"))
}
- if param.Type == nil {
+ if param.GetType() == nil {
errs.Collect(errors.NewValueRequiredErr(nodeID, "param.Type"))
}
}
diff --git a/flytepropeller/pkg/compiler/workflow_compiler.go b/flytepropeller/pkg/compiler/workflow_compiler.go
index 89e82ebd16..2cd5e9a65d 100644
--- a/flytepropeller/pkg/compiler/workflow_compiler.go
+++ b/flytepropeller/pkg/compiler/workflow_compiler.go
@@ -46,7 +46,7 @@ import (
// Updates workflows and tasks references to reflect the needed ones for this workflow (ignoring subworkflows)
func (w *workflowBuilder) updateRequiredReferences() {
- reqs := getRequirements(w.CoreWorkflow.Template, w.allSubWorkflows, false, errors.NewCompileErrors())
+ reqs := getRequirements(w.GetCoreWorkflow().GetTemplate(), w.allSubWorkflows, false, errors.NewCompileErrors())
workflows := map[c.WorkflowIDKey]c.InterfaceProvider{}
tasks := c.TaskIndex{}
for _, workflowID := range reqs.launchPlanIds {
@@ -167,8 +167,8 @@ func (w workflowBuilder) AddEdges(n c.NodeBuilder, edgeDirection c.EdgeDirection
// Contains the main validation logic for the coreWorkflow. If successful, it'll build an executable Workflow.
func (w workflowBuilder) ValidateWorkflow(fg *flyteWorkflow, errs errors.CompileErrors) (c.Workflow, bool) {
- if len(fg.Template.Nodes) == 0 {
- errs.Collect(errors.NewNoNodesFoundErr(fg.Template.Id.String()))
+ if len(fg.GetTemplate().GetNodes()) == 0 {
+ errs.Collect(errors.NewNoNodesFoundErr(fg.GetTemplate().GetId().String()))
return nil, !errs.HasErrors()
}
@@ -183,25 +183,25 @@ func (w workflowBuilder) ValidateWorkflow(fg *flyteWorkflow, errs errors.Compile
}
var ok bool
- if wf.CoreWorkflow.Template.Interface, ok = v.ValidateInterface(c.StartNodeID, wf.CoreWorkflow.Template.Interface, errs.NewScope()); !ok {
+ if wf.CoreWorkflow.Template.Interface, ok = v.ValidateInterface(c.StartNodeID, wf.GetCoreWorkflow().GetTemplate().GetInterface(), errs.NewScope()); !ok {
return nil, !errs.HasErrors()
}
- checkpoint := make([]*core.Node, 0, len(fg.Template.Nodes))
- checkpoint = append(checkpoint, fg.Template.Nodes...)
- fg.Template.Nodes = make([]*core.Node, 0, len(fg.Template.Nodes))
+ checkpoint := make([]*core.Node, 0, len(fg.GetTemplate().GetNodes()))
+ checkpoint = append(checkpoint, fg.GetTemplate().GetNodes()...)
+ fg.Template.Nodes = make([]*core.Node, 0, len(fg.GetTemplate().GetNodes()))
wf.GetCoreWorkflow().Connections = &core.ConnectionSet{
Downstream: make(map[string]*core.ConnectionSet_IdList),
Upstream: make(map[string]*core.ConnectionSet_IdList),
}
globalInputNode, _ := wf.AddNode(wf.GetOrCreateNodeBuilder(startNode), errs)
- globalInputNode.SetInterface(&core.TypedInterface{Outputs: wf.CoreWorkflow.Template.Interface.Inputs})
+ globalInputNode.SetInterface(&core.TypedInterface{Outputs: wf.GetCoreWorkflow().GetTemplate().GetInterface().GetInputs()})
endNode := &core.Node{Id: c.EndNodeID}
globalOutputNode, _ := wf.AddNode(wf.GetOrCreateNodeBuilder(endNode), errs)
- globalOutputNode.SetInterface(&core.TypedInterface{Inputs: wf.CoreWorkflow.Template.Interface.Outputs})
- globalOutputNode.SetInputs(wf.CoreWorkflow.Template.Outputs)
+ globalOutputNode.SetInterface(&core.TypedInterface{Inputs: wf.GetCoreWorkflow().GetTemplate().GetInterface().GetOutputs()})
+ globalOutputNode.SetInputs(wf.GetCoreWorkflow().GetTemplate().GetOutputs())
// Track top level nodes (a branch in a branch node is NOT a top level node). The final graph should ensure that all
// top level nodes are executed before the end node. We do that by adding execution edges from leaf nodes that do not
@@ -210,7 +210,7 @@ func (w workflowBuilder) ValidateWorkflow(fg *flyteWorkflow, errs errors.Compile
// Add and validate all other nodes
for _, n := range checkpoint {
- topLevelNodes.Insert(n.Id)
+ topLevelNodes.Insert(n.GetId())
if node, addOk := wf.AddNode(wf.GetOrCreateNodeBuilder(n), errs.NewScope()); addOk {
v.ValidateNode(&wf, node, false /* validateConditionTypes */, errs.NewScope())
}
@@ -225,8 +225,8 @@ func (w workflowBuilder) ValidateWorkflow(fg *flyteWorkflow, errs errors.Compile
wf.AddEdges(n, c.EdgeDirectionBidirectional, errs.NewScope())
}
- if fg.Template.FailureNode != nil {
- failureNode := fg.Template.FailureNode
+ if fg.GetTemplate().GetFailureNode() != nil {
+ failureNode := fg.GetTemplate().GetFailureNode()
v.ValidateNode(&wf, wf.GetOrCreateNodeBuilder(failureNode), false, errs.NewScope())
wf.AddEdges(wf.GetOrCreateNodeBuilder(failureNode), c.EdgeDirectionUpstream, errs.NewScope())
}
@@ -272,7 +272,7 @@ func (w workflowBuilder) ValidateWorkflow(fg *flyteWorkflow, errs errors.Compile
// Validates that all requirements for the coreWorkflow and its subworkflows are present.
func (w workflowBuilder) validateAllRequirements(errs errors.CompileErrors) bool {
- reqs := getRequirements(w.CoreWorkflow.Template, w.allSubWorkflows, true, errs)
+ reqs := getRequirements(w.GetCoreWorkflow().GetTemplate(), w.allSubWorkflows, true, errs)
for _, lp := range reqs.launchPlanIds {
if _, ok := w.allLaunchPlans[lp.String()]; !ok {
@@ -314,17 +314,17 @@ func CompileWorkflow(primaryWf *core.WorkflowTemplate, subworkflows []*core.Work
uniqueTasks := sets.NewString()
taskBuilders := make([]c.Task, 0, len(tasks))
for _, task := range tasks {
- if task.Template == nil || task.Template.Id == nil {
+ if task.GetTemplate() == nil || task.GetTemplate().GetId() == nil {
errs.Collect(errors.NewValueRequiredErr("task", "Template.Id"))
return nil, errs
}
- if uniqueTasks.Has(task.Template.Id.String()) {
+ if uniqueTasks.Has(task.GetTemplate().GetId().String()) {
continue
}
- taskBuilders = append(taskBuilders, &taskBuilder{flyteTask: task.Template})
- uniqueTasks.Insert(task.Template.Id.String())
+ taskBuilders = append(taskBuilders, &taskBuilder{flyteTask: task.GetTemplate()})
+ uniqueTasks.Insert(task.GetTemplate().GetId().String())
}
// Validate overall requirements of the coreWorkflow.
diff --git a/flytepropeller/pkg/compiler/workflow_compiler_test.go b/flytepropeller/pkg/compiler/workflow_compiler_test.go
index 84d55aa342..8c9cefdc25 100644
--- a/flytepropeller/pkg/compiler/workflow_compiler_test.go
+++ b/flytepropeller/pkg/compiler/workflow_compiler_test.go
@@ -36,7 +36,7 @@ func dumpIdentifierNames(ids []common.Identifier) []string {
res := make([]string, 0, len(ids))
for _, id := range ids {
- res = append(res, id.Name)
+ res = append(res, id.GetName())
}
return res
@@ -98,7 +98,7 @@ func ExampleCompileWorkflow_basic() {
for _, task := range tasks {
compiledTask, err := CompileTask(task)
if err != nil {
- fmt.Printf("failed to compile task [%v]. Error: %v", task.Id, err)
+ fmt.Printf("failed to compile task [%v]. Error: %v", task.GetId(), err)
return
}
@@ -106,7 +106,7 @@ func ExampleCompileWorkflow_basic() {
}
output, errs := CompileWorkflow(inputWorkflow, subWorkflows, compiledTasks, workflows)
- fmt.Printf("Compiled Workflow in GraphViz: %v\n", visualize.ToGraphViz(output.Primary))
+ fmt.Printf("Compiled Workflow in GraphViz: %v\n", visualize.ToGraphViz(output.GetPrimary()))
fmt.Printf("Compile Errors: %v\n", errs)
// Output:
@@ -195,8 +195,8 @@ func TestCompileWorkflowWithFailureNode(t *testing.T) {
}
output, errs := CompileWorkflow(inputWorkflow, subWorkflows, compiledTasks, workflows)
- assert.Equal(t, output.Primary.Template.FailureNode.Id, "FailureNode")
- assert.NotNil(t, output.Primary.Template.FailureNode.GetTaskNode())
+ assert.Equal(t, output.GetPrimary().GetTemplate().GetFailureNode().GetId(), "FailureNode")
+ assert.NotNil(t, output.GetPrimary().GetTemplate().GetFailureNode().GetTaskNode())
assert.Nil(t, errs)
}
@@ -287,7 +287,7 @@ func ExampleCompileWorkflow_inputsOutputsBinding() {
for _, task := range inputTasks {
compiledTask, err := CompileTask(task)
if err != nil {
- fmt.Printf("Failed to compile task [%v]. Error: %v", task.Id, err)
+ fmt.Printf("Failed to compile task [%v]. Error: %v", task.GetId(), err)
return
}
@@ -298,7 +298,7 @@ func ExampleCompileWorkflow_inputsOutputsBinding() {
if errs != nil {
fmt.Printf("Compile Errors: %v\n", errs)
} else {
- fmt.Printf("Compiled Workflow in GraphViz: %v\n", visualize.ToGraphViz(output.Primary))
+ fmt.Printf("Compiled Workflow in GraphViz: %v\n", visualize.ToGraphViz(output.GetPrimary()))
}
// Output:
@@ -575,7 +575,7 @@ func TestValidateUnderlyingInterface(parentT *testing.T) {
parentT.Run("TaskNode", func(t *testing.T) {
errs := errors.NewCompileErrors()
- iface, ifaceOk := v.ValidateUnderlyingInterface(&g, &nodeBuilder{flyteNode: inputWorkflow.Nodes[0]}, errs)
+ iface, ifaceOk := v.ValidateUnderlyingInterface(&g, &nodeBuilder{flyteNode: inputWorkflow.GetNodes()[0]}, errs)
assert.True(t, ifaceOk)
assert.False(t, errs.HasErrors())
assert.Equal(t, taskIface, iface)
@@ -587,7 +587,7 @@ func TestValidateUnderlyingInterface(parentT *testing.T) {
Target: &core.Node_WorkflowNode{
WorkflowNode: &core.WorkflowNode{
Reference: &core.WorkflowNode_SubWorkflowRef{
- SubWorkflowRef: inputWorkflow.Id,
+ SubWorkflowRef: inputWorkflow.GetId(),
},
},
},
@@ -605,7 +605,7 @@ func TestValidateUnderlyingInterface(parentT *testing.T) {
BranchNode: &core.BranchNode{
IfElse: &core.IfElseBlock{
Case: &core.IfBlock{
- ThenNode: inputWorkflow.Nodes[0],
+ ThenNode: inputWorkflow.GetNodes()[0],
},
},
},
@@ -613,7 +613,7 @@ func TestValidateUnderlyingInterface(parentT *testing.T) {
}}, errs)
assert.True(t, ifaceOk)
assert.False(t, errs.HasErrors())
- assert.Equal(t, taskIface.Outputs, iface.Outputs)
+ assert.Equal(t, taskIface.GetOutputs(), iface.GetOutputs())
})
branchT.Run("TwoCases", func(t *testing.T) {
@@ -623,7 +623,7 @@ func TestValidateUnderlyingInterface(parentT *testing.T) {
BranchNode: &core.BranchNode{
IfElse: &core.IfElseBlock{
Case: &core.IfBlock{
- ThenNode: inputWorkflow.Nodes[0],
+ ThenNode: inputWorkflow.GetNodes()[0],
},
Other: []*core.IfBlock{
{
@@ -631,7 +631,7 @@ func TestValidateUnderlyingInterface(parentT *testing.T) {
Target: &core.Node_WorkflowNode{
WorkflowNode: &core.WorkflowNode{
Reference: &core.WorkflowNode_SubWorkflowRef{
- SubWorkflowRef: inputWorkflow.Id,
+ SubWorkflowRef: inputWorkflow.GetId(),
},
},
},
@@ -720,9 +720,9 @@ func TestCompileWorkflow(t *testing.T) {
assert.NoError(t, errs)
assert.NotNil(t, output)
if output != nil {
- t.Logf("Graph Repr: %v", visualize.ToGraphViz(output.Primary))
+ t.Logf("Graph Repr: %v", visualize.ToGraphViz(output.GetPrimary()))
- assert.Equal(t, []string{"node_123"}, output.Primary.Connections.Upstream["node_456"].Ids)
+ assert.Equal(t, []string{"node_123"}, output.GetPrimary().GetConnections().GetUpstream()["node_456"].GetIds())
}
}
diff --git a/flytepropeller/pkg/controller/handler.go b/flytepropeller/pkg/controller/handler.go
index 49c2c21549..3e9f7526fc 100644
--- a/flytepropeller/pkg/controller/handler.go
+++ b/flytepropeller/pkg/controller/handler.go
@@ -102,7 +102,7 @@ func (p *Propeller) TryMutateWorkflow(ctx context.Context, originalW *v1alpha1.F
}
ctx = contextutils.WithResourceVersion(ctx, mutableW.GetResourceVersion())
- maxRetries := uint32(p.cfg.MaxWorkflowRetries)
+ maxRetries := uint32(p.cfg.MaxWorkflowRetries) // #nosec G115
if IsDeleted(mutableW) || (mutableW.Status.FailedAttempts > maxRetries) {
var err error
func() {
@@ -267,7 +267,7 @@ func (p *Propeller) parseWorkflowClosureCrdFields(ctx context.Context, dataRefer
return nil, err
}
- wfClosureCrdFields, err := k8s.BuildWfClosureCrdFields(wfClosure.CompiledWorkflow)
+ wfClosureCrdFields, err := k8s.BuildWfClosureCrdFields(wfClosure.GetCompiledWorkflow())
if err != nil {
logger.Errorf(ctx, "Failed to parse workflow closure data from '%s' with error '%s'", dataReference, err)
return nil, err
diff --git a/flytepropeller/pkg/controller/nodes/array/event_recorder.go b/flytepropeller/pkg/controller/nodes/array/event_recorder.go
index 999b383f39..8d14bd1d43 100644
--- a/flytepropeller/pkg/controller/nodes/array/event_recorder.go
+++ b/flytepropeller/pkg/controller/nodes/array/event_recorder.go
@@ -93,10 +93,10 @@ func (e *externalResourcesEventRecorder) process(ctx context.Context, nCtx inter
// process events
cacheStatus := idlcore.CatalogCacheStatus_CACHE_DISABLED
for _, nodeExecutionEvent := range e.nodeEvents {
- switch target := nodeExecutionEvent.TargetMetadata.(type) {
+ switch target := nodeExecutionEvent.GetTargetMetadata().(type) {
case *event.NodeExecutionEvent_TaskNodeMetadata:
if target.TaskNodeMetadata != nil {
- cacheStatus = target.TaskNodeMetadata.CacheStatus
+ cacheStatus = target.TaskNodeMetadata.GetCacheStatus()
}
}
}
@@ -106,7 +106,7 @@ func (e *externalResourcesEventRecorder) process(ctx context.Context, nCtx inter
if cacheStatus == idlcore.CatalogCacheStatus_CACHE_HIT && len(e.taskEvents) == 0 {
e.externalResources = append(e.externalResources, &event.ExternalResourceInfo{
ExternalId: externalResourceID,
- Index: uint32(index),
+ Index: uint32(index), // #nosec G115
RetryAttempt: retryAttempt,
Phase: idlcore.TaskExecution_SUCCEEDED,
CacheStatus: cacheStatus,
@@ -122,7 +122,7 @@ func (e *externalResourcesEventRecorder) process(ctx context.Context, nCtx inter
}
for _, taskExecutionEvent := range e.taskEvents {
- if mapLogPlugin != nil && len(taskExecutionEvent.Logs) > 0 {
+ if mapLogPlugin != nil && len(taskExecutionEvent.GetLogs()) > 0 {
// override log links for subNode execution with map plugin
logs, err := getPluginLogs(mapLogPlugin, nCtx, index, retryAttempt)
if err != nil {
@@ -132,16 +132,16 @@ func (e *externalResourcesEventRecorder) process(ctx context.Context, nCtx inter
}
}
- for _, log := range taskExecutionEvent.Logs {
- log.Name = fmt.Sprintf("%s-%d", log.Name, index)
+ for _, log := range taskExecutionEvent.GetLogs() {
+ log.Name = fmt.Sprintf("%s-%d", log.GetName(), index)
}
e.externalResources = append(e.externalResources, &event.ExternalResourceInfo{
ExternalId: externalResourceID,
- Index: uint32(index),
- Logs: taskExecutionEvent.Logs,
+ Index: uint32(index), // #nosec G115
+ Logs: taskExecutionEvent.GetLogs(),
RetryAttempt: retryAttempt,
- Phase: taskExecutionEvent.Phase,
+ Phase: taskExecutionEvent.GetPhase(),
CacheStatus: cacheStatus,
})
}
@@ -175,7 +175,7 @@ func (e *externalResourcesEventRecorder) finalize(ctx context.Context, nCtx inte
nodeExecutionID := *nCtx.NodeExecutionMetadata().GetNodeExecutionID()
if nCtx.ExecutionContext().GetEventVersion() != v1alpha1.EventVersion0 {
- currentNodeUniqueID, err := common.GenerateUniqueID(nCtx.ExecutionContext().GetParentInfo(), nodeExecutionID.NodeId)
+ currentNodeUniqueID, err := common.GenerateUniqueID(nCtx.ExecutionContext().GetParentInfo(), nodeExecutionID.GetNodeId())
if err != nil {
return err
}
@@ -315,7 +315,7 @@ func getPluginLogs(logPlugin tasklog.Plugin, nCtx interfaces.NodeExecutionContex
extraLogTemplateVars := []tasklog.TemplateVar{
{
Regex: mapplugin.LogTemplateRegexes.ExecutionIndex,
- Value: strconv.FormatUint(uint64(index), 10),
+ Value: strconv.FormatUint(uint64(index), 10), // #nosec G115
},
{
Regex: mapplugin.LogTemplateRegexes.RetryAttempt,
@@ -374,12 +374,12 @@ func sendEvents(ctx context.Context, nCtx interfaces.NodeExecutionContext, index
taskExecutionEvent := &event.TaskExecutionEvent{
TaskId: &idlcore.Identifier{
ResourceType: idlcore.ResourceType_TASK,
- Project: workflowExecutionID.Project,
- Domain: workflowExecutionID.Domain,
+ Project: workflowExecutionID.GetProject(),
+ Domain: workflowExecutionID.GetDomain(),
Name: fmt.Sprintf("%s-%d", buildSubNodeID(nCtx, index), retryAttempt),
Version: "v1", // this value is irrelevant but necessary for the identifier to be valid
},
- ParentNodeExecutionId: nodeExecutionEvent.Id,
+ ParentNodeExecutionId: nodeExecutionEvent.GetId(),
Phase: taskPhase,
TaskType: "k8s-array",
OccurredAt: timestamp,
diff --git a/flytepropeller/pkg/controller/nodes/array/event_recorder_test.go b/flytepropeller/pkg/controller/nodes/array/event_recorder_test.go
index 64fbff7666..5e418d3fc8 100644
--- a/flytepropeller/pkg/controller/nodes/array/event_recorder_test.go
+++ b/flytepropeller/pkg/controller/nodes/array/event_recorder_test.go
@@ -101,6 +101,6 @@ func TestGetPluginLogs(t *testing.T) {
assert.Nil(t, err)
assert.Equal(t, len(logConfig.Templates), len(logs))
- assert.Equal(t, "bar", logs[0].Name)
- assert.Equal(t, "/console/projects/node_project/domains/node_domain/executions/node_name/nodeId/foo/taskId/task_name/attempt/0/mappedIndex/1/mappedAttempt/0/view/logs?duration=all", logs[0].Uri)
+ assert.Equal(t, "bar", logs[0].GetName())
+ assert.Equal(t, "/console/projects/node_project/domains/node_domain/executions/node_name/nodeId/foo/taskId/task_name/attempt/0/mappedIndex/1/mappedAttempt/0/view/logs?duration=all", logs[0].GetUri())
}
diff --git a/flytepropeller/pkg/controller/nodes/array/handler.go b/flytepropeller/pkg/controller/nodes/array/handler.go
index 7495c77e16..17f49adcd3 100644
--- a/flytepropeller/pkg/controller/nodes/array/handler.go
+++ b/flytepropeller/pkg/controller/nodes/array/handler.go
@@ -76,7 +76,7 @@ func (a *arrayNodeHandler) Abort(ctx context.Context, nCtx interfaces.NodeExecut
switch arrayNodeState.Phase {
case v1alpha1.ArrayNodePhaseExecuting, v1alpha1.ArrayNodePhaseFailing:
for i, nodePhaseUint64 := range arrayNodeState.SubNodePhases.GetItems() {
- nodePhase := v1alpha1.NodePhase(nodePhaseUint64)
+ nodePhase := v1alpha1.NodePhase(nodePhaseUint64) // #nosec G115
// do not process nodes that have not started or are in a terminal state
if nodePhase == v1alpha1.NodePhaseNotYetStarted || isTerminalNodePhase(nodePhase) {
@@ -96,7 +96,7 @@ func (a *arrayNodeHandler) Abort(ctx context.Context, nCtx interfaces.NodeExecut
messageCollector.Collect(i, err.Error())
} else {
// record events transitioning subNodes to aborted
- retryAttempt := uint32(arrayNodeState.SubNodeRetryAttempts.GetItem(i))
+ retryAttempt := uint32(arrayNodeState.SubNodeRetryAttempts.GetItem(i)) // #nosec G115
if err := sendEvents(ctx, nCtx, i, retryAttempt, idlcore.NodeExecution_ABORTED, idlcore.TaskExecution_ABORTED, eventRecorder, a.eventConfig); err != nil {
logger.Warnf(ctx, "failed to record ArrayNode events: %v", err)
@@ -110,7 +110,7 @@ func (a *arrayNodeHandler) Abort(ctx context.Context, nCtx interfaces.NodeExecut
}
if messageCollector.Length() > 0 {
- return fmt.Errorf(messageCollector.Summary(events.MaxErrorMessageLength))
+ return fmt.Errorf(messageCollector.Summary(events.MaxErrorMessageLength)) //nolint:govet,staticcheck
}
// update state for subNodes
@@ -136,7 +136,7 @@ func (a *arrayNodeHandler) Finalize(ctx context.Context, nCtx interfaces.NodeExe
switch arrayNodeState.Phase {
case v1alpha1.ArrayNodePhaseExecuting, v1alpha1.ArrayNodePhaseFailing, v1alpha1.ArrayNodePhaseSucceeding:
for i, nodePhaseUint64 := range arrayNodeState.SubNodePhases.GetItems() {
- nodePhase := v1alpha1.NodePhase(nodePhaseUint64)
+ nodePhase := v1alpha1.NodePhase(nodePhaseUint64) // #nosec G115
// do not process nodes that have not started or are in a terminal state
if nodePhase == v1alpha1.NodePhaseNotYetStarted || isTerminalNodePhase(nodePhase) {
@@ -159,7 +159,7 @@ func (a *arrayNodeHandler) Finalize(ctx context.Context, nCtx interfaces.NodeExe
}
if messageCollector.Length() > 0 {
- return fmt.Errorf(messageCollector.Summary(events.MaxErrorMessageLength))
+ return fmt.Errorf(messageCollector.Summary(events.MaxErrorMessageLength)) //nolint:govet,staticcheck
}
return nil
@@ -192,7 +192,7 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu
size := -1
- for key, variable := range literalMap.Literals {
+ for key, variable := range literalMap.GetLiterals() {
literalType := validators.LiteralTypeForLiteral(variable)
err := validators.ValidateLiteralType(literalType)
if err != nil {
@@ -211,9 +211,9 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu
), nil
}
}
- switch literalType.Type.(type) {
+ switch literalType.GetType().(type) {
case *idlcore.LiteralType_CollectionType:
- collectionLength := len(variable.GetCollection().Literals)
+ collectionLength := len(variable.GetCollection().GetLiterals())
if size == -1 {
size = collectionLength
} else if size != collectionLength {
@@ -256,7 +256,7 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu
{arrayReference: &arrayNodeState.SubNodeSystemFailures, maxValue: maxSystemFailuresValue},
} {
- *item.arrayReference, err = bitarray.NewCompactArray(uint(size), bitarray.Item(item.maxValue))
+ *item.arrayReference, err = bitarray.NewCompactArray(uint(size), bitarray.Item(item.maxValue)) // #nosec G115
if err != nil {
return handler.UnknownTransition, err
}
@@ -288,8 +288,8 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu
break
}
- nodePhase := v1alpha1.NodePhase(nodePhaseUint64)
- taskPhase := int(arrayNodeState.SubNodeTaskPhases.GetItem(i))
+ nodePhase := v1alpha1.NodePhase(nodePhaseUint64) // #nosec G115
+ taskPhase := int(arrayNodeState.SubNodeTaskPhases.GetItem(i)) // #nosec G115
// do not process nodes in terminal state
if isTerminalNodePhase(nodePhase) {
@@ -370,12 +370,12 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu
}
// update subNode state
- arrayNodeState.SubNodePhases.SetItem(index, uint64(subNodeStatus.GetPhase()))
+ arrayNodeState.SubNodePhases.SetItem(index, uint64(subNodeStatus.GetPhase())) // #nosec G115
if subNodeStatus.GetTaskNodeStatus() == nil {
// resetting task phase because during retries we clear the GetTaskNodeStatus
arrayNodeState.SubNodeTaskPhases.SetItem(index, uint64(0))
} else {
- arrayNodeState.SubNodeTaskPhases.SetItem(index, uint64(subNodeStatus.GetTaskNodeStatus().GetPhase()))
+ arrayNodeState.SubNodeTaskPhases.SetItem(index, uint64(subNodeStatus.GetTaskNodeStatus().GetPhase())) // #nosec G115
}
arrayNodeState.SubNodeRetryAttempts.SetItem(index, uint64(subNodeStatus.GetAttempts()))
arrayNodeState.SubNodeSystemFailures.SetItem(index, uint64(subNodeStatus.GetSystemFailures()))
@@ -397,7 +397,7 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu
failingCount := 0
runningCount := 0
for _, nodePhaseUint64 := range arrayNodeState.SubNodePhases.GetItems() {
- nodePhase := v1alpha1.NodePhase(nodePhaseUint64)
+ nodePhase := v1alpha1.NodePhase(nodePhaseUint64) // #nosec G115
switch nodePhase {
case v1alpha1.NodePhaseSucceeded, v1alpha1.NodePhaseRecovered, v1alpha1.NodePhaseSkipped:
successCount++
@@ -457,7 +457,7 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu
case v1alpha1.ArrayNodePhaseSucceeding:
gatherOutputsRequests := make([]*gatherOutputsRequest, 0, len(arrayNodeState.SubNodePhases.GetItems()))
for i, nodePhaseUint64 := range arrayNodeState.SubNodePhases.GetItems() {
- nodePhase := v1alpha1.NodePhase(nodePhaseUint64)
+ nodePhase := v1alpha1.NodePhase(nodePhaseUint64) // #nosec G115
gatherOutputsRequest := &gatherOutputsRequest{
ctx: ctx,
responseChannel: make(chan struct {
@@ -479,8 +479,8 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu
continue
}
- if task.CoreTask() != nil && task.CoreTask().Interface != nil && task.CoreTask().Interface.Outputs != nil {
- for name := range task.CoreTask().Interface.Outputs.Variables {
+ if task.CoreTask() != nil && task.CoreTask().GetInterface() != nil && task.CoreTask().GetInterface().GetOutputs() != nil {
+ for name := range task.CoreTask().GetInterface().GetOutputs().GetVariables() {
outputLiterals[name] = nilLiteral
}
}
@@ -491,7 +491,7 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu
}{outputLiterals, nil}
} else {
// initialize subNode reader
- currentAttempt := int(arrayNodeState.SubNodeRetryAttempts.GetItem(i))
+ currentAttempt := int(arrayNodeState.SubNodeRetryAttempts.GetItem(i)) // #nosec G115
subDataDir, subOutputDir, err := constructOutputReferences(ctx, nCtx,
strconv.Itoa(i), strconv.Itoa(currentAttempt))
if err != nil {
@@ -527,7 +527,7 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu
}
if outputs := taskNode.CoreTask().GetInterface().GetOutputs(); outputs != nil {
- for name := range outputs.Variables {
+ for name := range outputs.GetVariables() {
outputLiteral := &idlcore.Literal{
Value: &idlcore.Literal_Collection{
Collection: &idlcore.LiteralCollection{
@@ -724,8 +724,8 @@ func New(nodeExecutor interfaces.Node, eventConfig *config.EventConfig, literalO
func (a *arrayNodeHandler) buildArrayNodeContext(ctx context.Context, nCtx interfaces.NodeExecutionContext, arrayNodeState *handler.ArrayNodeState, arrayNode v1alpha1.ExecutableArrayNode, subNodeIndex int, eventRecorder arrayEventRecorder) (
interfaces.Node, executors.ExecutionContext, executors.DAGStructure, executors.NodeLookup, *v1alpha1.NodeSpec, *v1alpha1.NodeStatus, error) {
- nodePhase := v1alpha1.NodePhase(arrayNodeState.SubNodePhases.GetItem(subNodeIndex))
- taskPhase := int(arrayNodeState.SubNodeTaskPhases.GetItem(subNodeIndex))
+ nodePhase := v1alpha1.NodePhase(arrayNodeState.SubNodePhases.GetItem(subNodeIndex)) // #nosec G115
+ taskPhase := int(arrayNodeState.SubNodeTaskPhases.GetItem(subNodeIndex)) // #nosec G115
// need to initialize the inputReader every time to ensure TaskHandler can access for cache lookups / population
inputs, err := nCtx.InputReader().Get(ctx)
@@ -761,7 +761,7 @@ func (a *arrayNodeHandler) buildArrayNodeContext(ctx context.Context, nCtx inter
}
// construct output references
- currentAttempt := uint32(arrayNodeState.SubNodeRetryAttempts.GetItem(subNodeIndex))
+ currentAttempt := uint32(arrayNodeState.SubNodeRetryAttempts.GetItem(subNodeIndex)) // #nosec G115
subDataDir, subOutputDir, err := constructOutputReferences(ctx, nCtx, strconv.Itoa(subNodeIndex), strconv.Itoa(int(currentAttempt)))
if err != nil {
return nil, nil, nil, nil, nil, nil, err
@@ -772,7 +772,7 @@ func (a *arrayNodeHandler) buildArrayNodeContext(ctx context.Context, nCtx inter
DataDir: subDataDir,
OutputDir: subOutputDir,
Attempts: currentAttempt,
- SystemFailures: uint32(arrayNodeState.SubNodeSystemFailures.GetItem(subNodeIndex)),
+ SystemFailures: uint32(arrayNodeState.SubNodeSystemFailures.GetItem(subNodeIndex)), // #nosec G115
TaskNodeStatus: &v1alpha1.TaskNodeStatus{
Phase: taskPhase,
PluginState: pluginStateBytes,
diff --git a/flytepropeller/pkg/controller/nodes/array/handler_test.go b/flytepropeller/pkg/controller/nodes/array/handler_test.go
index 08eea22e09..a759327423 100644
--- a/flytepropeller/pkg/controller/nodes/array/handler_test.go
+++ b/flytepropeller/pkg/controller/nodes/array/handler_test.go
@@ -254,15 +254,15 @@ func TestAbort(t *testing.T) {
{arrayReference: &arrayNodeState.SubNodeSystemFailures, maxValue: 1},
} {
- *item.arrayReference, err = bitarray.NewCompactArray(uint(size), bitarray.Item(item.maxValue))
+ *item.arrayReference, err = bitarray.NewCompactArray(uint(size), bitarray.Item(item.maxValue)) // #nosec G115
assert.NoError(t, err)
}
for i, nodePhase := range test.subNodePhases {
- arrayNodeState.SubNodePhases.SetItem(i, bitarray.Item(nodePhase))
+ arrayNodeState.SubNodePhases.SetItem(i, bitarray.Item(nodePhase)) // #nosec G115
}
for i, taskPhase := range test.subNodeTaskPhases {
- arrayNodeState.SubNodeTaskPhases.SetItem(i, bitarray.Item(taskPhase))
+ arrayNodeState.SubNodeTaskPhases.SetItem(i, bitarray.Item(taskPhase)) // #nosec G115
}
// create NodeExecutionContext
@@ -277,10 +277,10 @@ func TestAbort(t *testing.T) {
if len(test.expectedExternalResourcePhases) > 0 {
assert.Equal(t, 1, len(eventRecorder.taskExecutionEvents))
- externalResources := eventRecorder.taskExecutionEvents[0].Metadata.GetExternalResources()
+ externalResources := eventRecorder.taskExecutionEvents[0].GetMetadata().GetExternalResources()
assert.Equal(t, len(test.expectedExternalResourcePhases), len(externalResources))
for i, expectedPhase := range test.expectedExternalResourcePhases {
- assert.Equal(t, expectedPhase, externalResources[i].Phase)
+ assert.Equal(t, expectedPhase, externalResources[i].GetPhase())
}
} else {
assert.Equal(t, 0, len(eventRecorder.taskExecutionEvents))
@@ -349,16 +349,15 @@ func TestFinalize(t *testing.T) {
{arrayReference: &arrayNodeState.SubNodeRetryAttempts, maxValue: 1},
{arrayReference: &arrayNodeState.SubNodeSystemFailures, maxValue: 1},
} {
-
- *item.arrayReference, err = bitarray.NewCompactArray(uint(size), bitarray.Item(item.maxValue))
+ *item.arrayReference, err = bitarray.NewCompactArray(uint(size), bitarray.Item(item.maxValue)) // #nosec G115
assert.NoError(t, err)
}
for i, nodePhase := range test.subNodePhases {
- arrayNodeState.SubNodePhases.SetItem(i, bitarray.Item(nodePhase))
+ arrayNodeState.SubNodePhases.SetItem(i, bitarray.Item(nodePhase)) // #nosec G115
}
for i, taskPhase := range test.subNodeTaskPhases {
- arrayNodeState.SubNodeTaskPhases.SetItem(i, bitarray.Item(taskPhase))
+ arrayNodeState.SubNodeTaskPhases.SetItem(i, bitarray.Item(taskPhase)) // #nosec G115
}
// create NodeExecutionContext
@@ -447,10 +446,10 @@ func TestHandleArrayNodePhaseNone(t *testing.T) {
if len(test.expectedExternalResourcePhases) > 0 {
assert.Equal(t, 1, len(eventRecorder.taskExecutionEvents))
- externalResources := eventRecorder.taskExecutionEvents[0].Metadata.GetExternalResources()
+ externalResources := eventRecorder.taskExecutionEvents[0].GetMetadata().GetExternalResources()
assert.Equal(t, len(test.expectedExternalResourcePhases), len(externalResources))
for i, expectedPhase := range test.expectedExternalResourcePhases {
- assert.Equal(t, expectedPhase, externalResources[i].Phase)
+ assert.Equal(t, expectedPhase, externalResources[i].GetPhase())
}
} else {
assert.Equal(t, 0, len(eventRecorder.taskExecutionEvents))
@@ -475,7 +474,7 @@ func (f *fakeEventRecorder) RecordNodeEvent(ctx context.Context, event *event.No
func (f *fakeEventRecorder) RecordTaskEvent(ctx context.Context, event *event.TaskExecutionEvent, eventConfig *config.EventConfig) error {
f.recordTaskEventCallCount++
- if f.phaseVersionFailures == 0 || event.PhaseVersion < f.phaseVersionFailures {
+ if f.phaseVersionFailures == 0 || event.GetPhaseVersion() < f.phaseVersionFailures {
return f.taskErr
}
return nil
@@ -860,13 +859,12 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) {
{arrayReference: &arrayNodeState.SubNodeRetryAttempts, maxValue: 1},
{arrayReference: &arrayNodeState.SubNodeSystemFailures, maxValue: 1},
} {
-
- *item.arrayReference, err = bitarray.NewCompactArray(uint(size), bitarray.Item(item.maxValue))
+ *item.arrayReference, err = bitarray.NewCompactArray(uint(size), bitarray.Item(item.maxValue)) // #nosec G115
assert.NoError(t, err)
}
for i, nodePhase := range test.subNodePhases {
- arrayNodeState.SubNodePhases.SetItem(i, bitarray.Item(nodePhase))
+ arrayNodeState.SubNodePhases.SetItem(i, bitarray.Item(nodePhase)) // #nosec G115
}
nodeSpec := arrayNodeSpec
@@ -921,7 +919,7 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) {
assert.Equal(t, test.expectedTaskPhaseVersion, arrayNodeState.TaskPhaseVersion)
for i, expectedPhase := range test.expectedArrayNodeSubPhases {
- assert.Equal(t, expectedPhase, v1alpha1.NodePhase(arrayNodeState.SubNodePhases.GetItem(i)))
+ assert.Equal(t, expectedPhase, v1alpha1.NodePhase(arrayNodeState.SubNodePhases.GetItem(i))) // #nosec G115
}
bufferedEventRecorder, ok := eventRecorder.(*bufferedEventRecorder)
@@ -929,10 +927,10 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) {
if len(test.expectedExternalResourcePhases) > 0 {
assert.Equal(t, 1, len(bufferedEventRecorder.taskExecutionEvents))
- externalResources := bufferedEventRecorder.taskExecutionEvents[0].Metadata.GetExternalResources()
+ externalResources := bufferedEventRecorder.taskExecutionEvents[0].GetMetadata().GetExternalResources()
assert.Equal(t, len(test.expectedExternalResourcePhases), len(externalResources))
for i, expectedPhase := range test.expectedExternalResourcePhases {
- assert.Equal(t, expectedPhase, externalResources[i].Phase)
+ assert.Equal(t, expectedPhase, externalResources[i].GetPhase())
}
} else {
assert.Equal(t, 0, len(bufferedEventRecorder.taskExecutionEvents))
@@ -1000,8 +998,8 @@ func TestHandle_InvalidLiteralType(t *testing.T) {
// Validate results
assert.Equal(t, test.expectedTransitionType, transition.Type())
assert.Equal(t, test.expectedPhase, transition.Info().GetPhase())
- assert.Equal(t, test.expectedErrorCode, transition.Info().GetErr().Code)
- assert.Contains(t, transition.Info().GetErr().Message, test.expectedContainedErrorMsg)
+ assert.Equal(t, test.expectedErrorCode, transition.Info().GetErr().GetCode())
+ assert.Contains(t, transition.Info().GetErr().GetMessage(), test.expectedContainedErrorMsg)
})
}
}
@@ -1175,7 +1173,7 @@ func TestHandleArrayNodePhaseSucceeding(t *testing.T) {
subNodePhases, err := bitarray.NewCompactArray(uint(len(test.subNodePhases)), bitarray.Item(v1alpha1.NodePhaseRecovered))
assert.NoError(t, err)
for i, nodePhase := range test.subNodePhases {
- subNodePhases.SetItem(i, bitarray.Item(nodePhase))
+ subNodePhases.SetItem(i, bitarray.Item(nodePhase)) // #nosec G115
}
retryAttempts, err := bitarray.NewCompactArray(uint(len(test.subNodePhases)), bitarray.Item(1))
@@ -1304,13 +1302,12 @@ func TestHandleArrayNodePhaseFailing(t *testing.T) {
{arrayReference: &arrayNodeState.SubNodeRetryAttempts, maxValue: 1},
{arrayReference: &arrayNodeState.SubNodeSystemFailures, maxValue: 1},
} {
-
- *item.arrayReference, err = bitarray.NewCompactArray(uint(len(test.subNodePhases)), bitarray.Item(item.maxValue))
+ *item.arrayReference, err = bitarray.NewCompactArray(uint(len(test.subNodePhases)), bitarray.Item(item.maxValue)) // #nosec G115
assert.NoError(t, err)
}
for i, nodePhase := range test.subNodePhases {
- arrayNodeState.SubNodePhases.SetItem(i, bitarray.Item(nodePhase))
+ arrayNodeState.SubNodePhases.SetItem(i, bitarray.Item(nodePhase)) // #nosec G115
}
// create NodeExecutionContext
diff --git a/flytepropeller/pkg/controller/nodes/array/node_execution_context.go b/flytepropeller/pkg/controller/nodes/array/node_execution_context.go
index b2c9619695..d55de708c0 100644
--- a/flytepropeller/pkg/controller/nodes/array/node_execution_context.go
+++ b/flytepropeller/pkg/controller/nodes/array/node_execution_context.go
@@ -29,12 +29,12 @@ func newStaticInputReader(inputPaths io.InputFilePaths, input *core.LiteralMap)
func constructLiteralMap(inputs *core.LiteralMap, index int) (*core.LiteralMap, error) {
literals := make(map[string]*core.Literal)
- for name, literal := range inputs.Literals {
+ for name, literal := range inputs.GetLiterals() {
if literalCollection := literal.GetCollection(); literalCollection != nil {
- if index >= len(literalCollection.Literals) {
+ if index >= len(literalCollection.GetLiterals()) {
return nil, fmt.Errorf("index %v out of bounds for literal collection %v", index, name)
}
- literals[name] = literalCollection.Literals[index]
+ literals[name] = literalCollection.GetLiterals()[index]
} else {
literals[name] = literal
}
@@ -57,12 +57,12 @@ func (a *arrayTaskReader) Read(ctx context.Context) (*core.TaskTemplate, error)
// convert output list variable to singular
outputVariables := make(map[string]*core.Variable)
- for key, value := range originalTaskTemplate.Interface.Outputs.Variables {
- switch v := value.Type.Type.(type) {
+ for key, value := range originalTaskTemplate.GetInterface().GetOutputs().GetVariables() {
+ switch v := value.GetType().GetType().(type) {
case *core.LiteralType_CollectionType:
outputVariables[key] = &core.Variable{
Type: v.CollectionType,
- Description: value.Description,
+ Description: value.GetDescription(),
}
default:
outputVariables[key] = value
@@ -71,7 +71,7 @@ func (a *arrayTaskReader) Read(ctx context.Context) (*core.TaskTemplate, error)
taskTemplate := *originalTaskTemplate
taskTemplate.Interface = &core.TypedInterface{
- Inputs: originalTaskTemplate.Interface.Inputs,
+ Inputs: originalTaskTemplate.GetInterface().GetInputs(),
Outputs: &core.VariableMap{
Variables: outputVariables,
},
diff --git a/flytepropeller/pkg/controller/nodes/array/utils_test.go b/flytepropeller/pkg/controller/nodes/array/utils_test.go
index 2b2c030cd6..eeddd827ea 100644
--- a/flytepropeller/pkg/controller/nodes/array/utils_test.go
+++ b/flytepropeller/pkg/controller/nodes/array/utils_test.go
@@ -29,10 +29,10 @@ func TestAppendLiteral(t *testing.T) {
}
for _, v := range outputLiterals {
- collection, ok := v.Value.(*idlcore.Literal_Collection)
+ collection, ok := v.GetValue().(*idlcore.Literal_Collection)
assert.True(t, ok)
- assert.Equal(t, 2, len(collection.Collection.Literals))
+ assert.Equal(t, 2, len(collection.Collection.GetLiterals()))
}
}
diff --git a/flytepropeller/pkg/controller/nodes/attr_path_resolver_test.go b/flytepropeller/pkg/controller/nodes/attr_path_resolver_test.go
index e8e28ac08f..f617025ed9 100644
--- a/flytepropeller/pkg/controller/nodes/attr_path_resolver_test.go
+++ b/flytepropeller/pkg/controller/nodes/attr_path_resolver_test.go
@@ -520,7 +520,7 @@ func TestResolveAttrPathInBinary(t *testing.T) {
collection := make([]any, len(literals))
for i, l := range literals {
var v any
- _ = msgpack.Unmarshal(l.GetScalar().GetBinary().Value, &v)
+ _ = msgpack.Unmarshal(l.GetScalar().GetBinary().GetValue(), &v)
collection[i] = v
}
return collection
@@ -1434,10 +1434,10 @@ func TestResolveAttrPathInBinary(t *testing.T) {
// Helper function to unmarshal a Binary Literal into an any
unmarshalBinaryLiteral := func(literal *core.Literal) (any, error) {
- if scalar, ok := literal.Value.(*core.Literal_Scalar); ok {
- if binary, ok := scalar.Scalar.Value.(*core.Scalar_Binary); ok {
+ if scalar, ok := literal.GetValue().(*core.Literal_Scalar); ok {
+ if binary, ok := scalar.Scalar.GetValue().(*core.Scalar_Binary); ok {
var value any
- err := msgpack.Unmarshal(binary.Binary.Value, &value)
+ err := msgpack.Unmarshal(binary.Binary.GetValue(), &value)
return value, err
}
}
diff --git a/flytepropeller/pkg/controller/nodes/branch/comparator.go b/flytepropeller/pkg/controller/nodes/branch/comparator.go
index cfac3be0af..b52456ea51 100644
--- a/flytepropeller/pkg/controller/nodes/branch/comparator.go
+++ b/flytepropeller/pkg/controller/nodes/branch/comparator.go
@@ -72,14 +72,14 @@ var perTypeComparators = map[string]comparators{
}
func Evaluate(lValue *core.Primitive, rValue *core.Primitive, op core.ComparisonExpression_Operator) (bool, error) {
- lValueType := reflect.TypeOf(lValue.Value)
- rValueType := reflect.TypeOf(rValue.Value)
+ lValueType := reflect.TypeOf(lValue.GetValue())
+ rValueType := reflect.TypeOf(rValue.GetValue())
if lValueType != rValueType {
return false, errors.Errorf(ErrorCodeMalformedBranch, "Comparison between different primitives types. lVal[%v]:rVal[%v]", lValueType, rValueType)
}
comps, ok := perTypeComparators[lValueType.String()]
if !ok {
- return false, errors.Errorf("Comparator not defined for type: [%v]", lValueType.String())
+ return false, errors.Errorf("Comparator not defined for type: [%v]", lValueType.String()) //nolint:govet,staticcheck
}
isBoolean := false
if lValueType.String() == primitiveBooleanType {
diff --git a/flytepropeller/pkg/controller/nodes/branch/evaluator.go b/flytepropeller/pkg/controller/nodes/branch/evaluator.go
index 4bc1676745..c6d717cfe8 100644
--- a/flytepropeller/pkg/controller/nodes/branch/evaluator.go
+++ b/flytepropeller/pkg/controller/nodes/branch/evaluator.go
@@ -27,7 +27,7 @@ func EvaluateComparison(expr *core.ComparisonExpression, nodeInputs *core.Litera
if nodeInputs == nil {
return false, errors.Errorf(ErrorCodeMalformedBranch, "Failed to find Value for Variable [%v]", expr.GetLeftValue().GetVar())
}
- lValue = nodeInputs.Literals[expr.GetLeftValue().GetVar()]
+ lValue = nodeInputs.GetLiterals()[expr.GetLeftValue().GetVar()]
if lValue == nil {
return false, errors.Errorf(ErrorCodeMalformedBranch, "Failed to find Value for Variable [%v]", expr.GetLeftValue().GetVar())
}
@@ -39,7 +39,7 @@ func EvaluateComparison(expr *core.ComparisonExpression, nodeInputs *core.Litera
if nodeInputs == nil {
return false, errors.Errorf(ErrorCodeMalformedBranch, "Failed to find Value for Variable [%v]", expr.GetLeftValue().GetVar())
}
- rValue = nodeInputs.Literals[expr.GetRightValue().GetVar()]
+ rValue = nodeInputs.GetLiterals()[expr.GetRightValue().GetVar()]
if rValue == nil {
return false, errors.Errorf(ErrorCodeMalformedBranch, "Failed to find Value for Variable [%v]", expr.GetRightValue().GetVar())
}
@@ -136,7 +136,7 @@ func DecideBranch(ctx context.Context, nl executors.NodeLookup, nodeID v1alpha1.
if selectedNodeID == nil {
if node.GetElseFail() != nil {
- return nil, errors.Errorf(ErrorCodeUserProvidedError, node.GetElseFail().Message)
+ return nil, errors.Errorf(ErrorCodeUserProvidedError, node.GetElseFail().GetMessage()) //nolint:govet,staticcheck
}
return nil, errors.Errorf(ErrorCodeMalformedBranch, "No branch satisfied")
}
diff --git a/flytepropeller/pkg/controller/nodes/branch/evaluator_test.go b/flytepropeller/pkg/controller/nodes/branch/evaluator_test.go
index dae8a1337b..ae29572a22 100644
--- a/flytepropeller/pkg/controller/nodes/branch/evaluator_test.go
+++ b/flytepropeller/pkg/controller/nodes/branch/evaluator_test.go
@@ -279,7 +279,7 @@ func TestEvaluateBooleanExpression(t *testing.T) {
},
}
- for k, v := range inputs.Literals {
+ for k, v := range inputs.GetLiterals() {
outerInputs.Literals[k] = v
}
diff --git a/flytepropeller/pkg/controller/nodes/branch/handler.go b/flytepropeller/pkg/controller/nodes/branch/handler.go
index 431f5fa3eb..9789b65c22 100644
--- a/flytepropeller/pkg/controller/nodes/branch/handler.go
+++ b/flytepropeller/pkg/controller/nodes/branch/handler.go
@@ -183,7 +183,7 @@ func (b *branchHandler) Abort(ctx context.Context, nCtx interfaces.NodeExecution
// We should never reach here, but for safety and completeness
errMsg := "branch evaluation failed"
if branch.GetElseFail() != nil {
- errMsg = branch.GetElseFail().Message
+ errMsg = branch.GetElseFail().GetMessage()
}
logger.Errorf(ctx, errMsg)
return nil
@@ -227,7 +227,7 @@ func (b *branchHandler) Finalize(ctx context.Context, nCtx interfaces.NodeExecut
// We should never reach here, but for safety and completeness
errMsg := "branch evaluation failed"
if branch.GetElseFail() != nil {
- errMsg = branch.GetElseFail().Message
+ errMsg = branch.GetElseFail().GetMessage()
}
logger.Errorf(ctx, "failed to evaluate branch - user error: %s", errMsg)
return nil
diff --git a/flytepropeller/pkg/controller/nodes/cache.go b/flytepropeller/pkg/controller/nodes/cache.go
index e8e7fc3720..5b1188aa56 100644
--- a/flytepropeller/pkg/controller/nodes/cache.go
+++ b/flytepropeller/pkg/controller/nodes/cache.go
@@ -105,12 +105,12 @@ func (n *nodeExecutor) CheckCatalogCache(ctx context.Context, nCtx interfaces.No
return entry, nil
}
- logger.Infof(ctx, "Catalog CacheHit: for task [%s/%s/%s/%s]", catalogKey.Identifier.Project,
- catalogKey.Identifier.Domain, catalogKey.Identifier.Name, catalogKey.Identifier.Version)
+ logger.Infof(ctx, "Catalog CacheHit: for task [%s/%s/%s/%s]", catalogKey.Identifier.GetProject(),
+ catalogKey.Identifier.GetDomain(), catalogKey.Identifier.GetName(), catalogKey.Identifier.GetVersion())
n.metrics.catalogHitCount.Inc(ctx)
iface := catalogKey.TypedInterface
- if iface.Outputs != nil && len(iface.Outputs.Variables) > 0 {
+ if iface.GetOutputs() != nil && len(iface.GetOutputs().GetVariables()) > 0 {
// copy cached outputs to node outputs
o, ee, err := entry.GetOutputs().Read(ctx)
if err != nil {
@@ -157,15 +157,15 @@ func (n *nodeExecutor) GetOrExtendCatalogReservation(ctx context.Context, nCtx i
}
var status core.CatalogReservation_Status
- if reservation.OwnerId == ownerID {
+ if reservation.GetOwnerId() == ownerID {
status = core.CatalogReservation_RESERVATION_ACQUIRED
} else {
status = core.CatalogReservation_RESERVATION_EXISTS
}
n.metrics.reservationGetSuccessCount.Inc(ctx)
- return catalog.NewReservationEntry(reservation.ExpiresAt.AsTime(),
- reservation.HeartbeatInterval.AsDuration(), reservation.OwnerId, status), nil
+ return catalog.NewReservationEntry(reservation.GetExpiresAt().AsTime(),
+ reservation.GetHeartbeatInterval().AsDuration(), reservation.GetOwnerId(), status), nil
}
// ReleaseCatalogReservation attempts to release an artifact reservation if the task is cacheable
@@ -208,12 +208,12 @@ func (n *nodeExecutor) WriteCatalogCache(ctx context.Context, nCtx interfaces.No
}
iface := catalogKey.TypedInterface
- if iface.Outputs != nil && len(iface.Outputs.Variables) == 0 {
+ if iface.GetOutputs() != nil && len(iface.GetOutputs().GetVariables()) == 0 {
return catalog.NewStatus(core.CatalogCacheStatus_CACHE_DISABLED, nil), nil
}
- logger.Infof(ctx, "Catalog CacheEnabled. recording execution [%s/%s/%s/%s]", catalogKey.Identifier.Project,
- catalogKey.Identifier.Domain, catalogKey.Identifier.Name, catalogKey.Identifier.Version)
+ logger.Infof(ctx, "Catalog CacheEnabled. recording execution [%s/%s/%s/%s]", catalogKey.Identifier.GetProject(),
+ catalogKey.Identifier.GetDomain(), catalogKey.Identifier.GetName(), catalogKey.Identifier.GetVersion())
outputPaths := ioutils.NewReadOnlyOutputFilePaths(ctx, nCtx.DataStore(), nCtx.NodeStatus().GetOutputDir())
outputReader := ioutils.NewRemoteFileOutputReader(ctx, nCtx.DataStore(), outputPaths, 0)
diff --git a/flytepropeller/pkg/controller/nodes/cache_test.go b/flytepropeller/pkg/controller/nodes/cache_test.go
index fa9eecadb2..f6c57b31de 100644
--- a/flytepropeller/pkg/controller/nodes/cache_test.go
+++ b/flytepropeller/pkg/controller/nodes/cache_test.go
@@ -128,11 +128,11 @@ func TestUpdatePhaseCacheInfo(t *testing.T) {
// ensure cache and reservation status' are being set correctly
if test.cacheStatus != nil {
- assert.Equal(t, cacheStatus.GetCacheStatus(), phaseInfo.GetInfo().TaskNodeInfo.TaskNodeMetadata.CacheStatus)
+ assert.Equal(t, cacheStatus.GetCacheStatus(), phaseInfo.GetInfo().TaskNodeInfo.TaskNodeMetadata.GetCacheStatus())
}
if test.reservationStatus != nil {
- assert.Equal(t, reservationStatus, phaseInfo.GetInfo().TaskNodeInfo.TaskNodeMetadata.ReservationStatus)
+ assert.Equal(t, reservationStatus, phaseInfo.GetInfo().TaskNodeInfo.TaskNodeMetadata.GetReservationStatus())
}
})
}
diff --git a/flytepropeller/pkg/controller/nodes/catalog/config.go b/flytepropeller/pkg/controller/nodes/catalog/config.go
index 4dd7bc70ae..cededafcae 100644
--- a/flytepropeller/pkg/controller/nodes/catalog/config.go
+++ b/flytepropeller/pkg/controller/nodes/catalog/config.go
@@ -82,7 +82,7 @@ func NewCatalogClient(ctx context.Context, authOpt ...grpc.DialOption) (catalog.
case DataCatalogType:
return datacatalog.NewDataCatalog(ctx, catalogConfig.Endpoint, catalogConfig.Insecure,
catalogConfig.MaxCacheAge.Duration, catalogConfig.UseAdminAuth, catalogConfig.DefaultServiceConfig,
- uint(catalogConfig.MaxRetries), catalogConfig.BackoffScalar, catalogConfig.GetBackoffJitter(ctx), authOpt...)
+ uint(catalogConfig.MaxRetries), catalogConfig.BackoffScalar, catalogConfig.GetBackoffJitter(ctx), authOpt...) // #nosec G115
case NoOpDiscoveryType, "":
return NOOPCatalog{}, nil
}
diff --git a/flytepropeller/pkg/controller/nodes/catalog/datacatalog/datacatalog.go b/flytepropeller/pkg/controller/nodes/catalog/datacatalog/datacatalog.go
index 00a99d6c54..b10f5d0291 100644
--- a/flytepropeller/pkg/controller/nodes/catalog/datacatalog/datacatalog.go
+++ b/flytepropeller/pkg/controller/nodes/catalog/datacatalog/datacatalog.go
@@ -54,14 +54,14 @@ func (m *CatalogClient) GetDataset(ctx context.Context, key catalog.Key) (*datac
return nil, err
}
- return datasetResponse.Dataset, nil
+ return datasetResponse.GetDataset(), nil
}
// GetArtifactByTag retrieves an artifact using the provided tag and dataset.
func (m *CatalogClient) GetArtifactByTag(ctx context.Context, tagName string, dataset *datacatalog.Dataset) (*datacatalog.Artifact, error) {
logger.Debugf(ctx, "Get Artifact by tag %v", tagName)
artifactQuery := &datacatalog.GetArtifactRequest{
- Dataset: dataset.Id,
+ Dataset: dataset.GetId(),
QueryHandle: &datacatalog.GetArtifactRequest_TagName{
TagName: tagName,
},
@@ -73,21 +73,21 @@ func (m *CatalogClient) GetArtifactByTag(ctx context.Context, tagName string, da
// check artifact's age if the configuration specifies a max age
if m.maxCacheAge > time.Duration(0) {
- artifact := response.Artifact
- createdAt, err := ptypes.Timestamp(artifact.CreatedAt)
+ artifact := response.GetArtifact()
+ createdAt, err := ptypes.Timestamp(artifact.GetCreatedAt())
if err != nil {
- logger.Errorf(ctx, "DataCatalog Artifact has invalid createdAt %+v, err: %+v", artifact.CreatedAt, err)
+ logger.Errorf(ctx, "DataCatalog Artifact has invalid createdAt %+v, err: %+v", artifact.GetCreatedAt(), err)
return nil, err
}
if time.Since(createdAt) > m.maxCacheAge {
logger.Warningf(ctx, "Expired Cached Artifact %v created on %v, older than max age %v",
- artifact.Id, createdAt.String(), m.maxCacheAge)
+ artifact.GetId(), createdAt.String(), m.maxCacheAge)
return nil, status.Error(codes.NotFound, "Artifact over age limit")
}
}
- return response.Artifact, nil
+ return response.GetArtifact(), nil
}
// Get the cached task execution from Catalog.
@@ -103,7 +103,7 @@ func (m *CatalogClient) Get(ctx context.Context, key catalog.Key) (catalog.Entry
}
inputs := &core.LiteralMap{}
- if key.TypedInterface.Inputs != nil {
+ if key.TypedInterface.GetInputs() != nil {
retInputs, err := key.InputReader.Get(ctx)
if err != nil {
return catalog.Entry{}, errors.Wrap(err, "failed to read inputs when trying to query catalog")
@@ -139,11 +139,11 @@ func (m *CatalogClient) Get(ctx context.Context, key catalog.Key) (catalog.Entry
outputs, err := GenerateTaskOutputsFromArtifact(key.Identifier, key.TypedInterface, artifact)
if err != nil {
- logger.Errorf(ctx, "DataCatalog failed to get outputs from artifact %+v, err: %+v", artifact.Id, err)
+ logger.Errorf(ctx, "DataCatalog failed to get outputs from artifact %+v, err: %+v", artifact.GetId(), err)
return catalog.NewCatalogEntry(ioutils.NewInMemoryOutputReader(outputs, nil, nil), catalog.NewStatus(core.CatalogCacheStatus_CACHE_MISS, md)), err
}
- logger.Infof(ctx, "Retrieved %v outputs from artifact %v, tag: %v", len(outputs.Literals), artifact.Id, tag)
+ logger.Infof(ctx, "Retrieved %v outputs from artifact %v, tag: %v", len(outputs.GetLiterals()), artifact.GetId(), tag)
return catalog.NewCatalogEntry(ioutils.NewInMemoryOutputReader(outputs, nil, nil), catalog.NewStatus(core.CatalogCacheStatus_CACHE_HIT, md)), nil
}
@@ -178,7 +178,7 @@ func (m *CatalogClient) createDataset(ctx context.Context, key catalog.Key, meta
func (m *CatalogClient) prepareInputsAndOutputs(ctx context.Context, key catalog.Key, reader io.OutputReader) (inputs *core.LiteralMap, outputs *core.LiteralMap, err error) {
inputs = &core.LiteralMap{}
outputs = &core.LiteralMap{}
- if key.TypedInterface.Inputs != nil && len(key.TypedInterface.Inputs.Variables) != 0 {
+ if key.TypedInterface.GetInputs() != nil && len(key.TypedInterface.GetInputs().GetVariables()) != 0 {
retInputs, err := key.InputReader.Get(ctx)
if err != nil {
logger.Errorf(ctx, "DataCatalog failed to read inputs err: %s", err)
@@ -188,7 +188,7 @@ func (m *CatalogClient) prepareInputsAndOutputs(ctx context.Context, key catalog
inputs = retInputs
}
- if key.TypedInterface.Outputs != nil && len(key.TypedInterface.Outputs.Variables) != 0 {
+ if key.TypedInterface.GetOutputs() != nil && len(key.TypedInterface.GetOutputs().GetVariables()) != 0 {
retOutputs, retErr, err := reader.Read(ctx)
if err != nil {
logger.Errorf(ctx, "DataCatalog failed to read outputs err: %s", err)
@@ -211,8 +211,8 @@ func (m *CatalogClient) createArtifact(ctx context.Context, key catalog.Key, dat
logger.Debugf(ctx, "Creating artifact for key %+v, dataset %+v and execution %+v", key, datasetID, metadata)
// Create the artifact for the execution that belongs in the task
- artifactDataList := make([]*datacatalog.ArtifactData, 0, len(outputs.Literals))
- for name, value := range outputs.Literals {
+ artifactDataList := make([]*datacatalog.ArtifactData, 0, len(outputs.GetLiterals()))
+ for name, value := range outputs.GetLiterals() {
artifactData := &datacatalog.ArtifactData{
Name: name,
Value: value,
@@ -230,15 +230,15 @@ func (m *CatalogClient) createArtifact(ctx context.Context, key catalog.Key, dat
createArtifactRequest := &datacatalog.CreateArtifactRequest{Artifact: cachedArtifact}
_, err := m.client.CreateArtifact(ctx, createArtifactRequest)
if err != nil {
- logger.Errorf(ctx, "Failed to create Artifact %+v, err: %v", cachedArtifact.Id, err)
+ logger.Errorf(ctx, "Failed to create Artifact %+v, err: %v", cachedArtifact.GetId(), err)
return catalog.Status{}, err
}
- logger.Debugf(ctx, "Created artifact: %v, with %v outputs from execution %+v", cachedArtifact.Id, len(artifactDataList), metadata)
+ logger.Debugf(ctx, "Created artifact: %v, with %v outputs from execution %+v", cachedArtifact.GetId(), len(artifactDataList), metadata)
// Tag the artifact since it is the cached artifact
tagName, err := GenerateArtifactTagName(ctx, inputs, key.CacheIgnoreInputVars)
if err != nil {
- logger.Errorf(ctx, "Failed to generate tag for artifact %+v, err: %+v", cachedArtifact.Id, err)
+ logger.Errorf(ctx, "Failed to generate tag for artifact %+v, err: %+v", cachedArtifact.GetId(), err)
return catalog.Status{}, err
}
logger.Infof(ctx, "Cached exec tag: %v, task: %v", tagName, key.Identifier)
@@ -247,19 +247,19 @@ func (m *CatalogClient) createArtifact(ctx context.Context, key catalog.Key, dat
tag := &datacatalog.Tag{
Name: tagName,
Dataset: datasetID,
- ArtifactId: cachedArtifact.Id,
+ ArtifactId: cachedArtifact.GetId(),
}
_, err = m.client.AddTag(ctx, &datacatalog.AddTagRequest{Tag: tag})
if err != nil {
if status.Code(err) == codes.AlreadyExists {
- logger.Warnf(ctx, "Tag %v already exists for Artifact %v (idempotent)", tagName, cachedArtifact.Id)
+ logger.Warnf(ctx, "Tag %v already exists for Artifact %v (idempotent)", tagName, cachedArtifact.GetId())
} else {
- logger.Errorf(ctx, "Failed to add tag %+v for artifact %+v, err: %+v", tagName, cachedArtifact.Id, err)
+ logger.Errorf(ctx, "Failed to add tag %+v for artifact %+v, err: %+v", tagName, cachedArtifact.GetId(), err)
return catalog.Status{}, err
}
}
- logger.Debugf(ctx, "Successfully created artifact %+v for key %+v, dataset %+v and execution %+v", cachedArtifact.Id, key, datasetID, metadata)
+ logger.Debugf(ctx, "Successfully created artifact %+v for key %+v, dataset %+v and execution %+v", cachedArtifact.GetId(), key, datasetID, metadata)
return catalog.NewStatus(core.CatalogCacheStatus_CACHE_POPULATED, EventCatalogMetadata(datasetID, tag, nil)), nil
}
@@ -267,8 +267,8 @@ func (m *CatalogClient) createArtifact(ctx context.Context, key catalog.Key, dat
func (m *CatalogClient) updateArtifact(ctx context.Context, key catalog.Key, datasetID *datacatalog.DatasetID, inputs *core.LiteralMap, outputs *core.LiteralMap, metadata catalog.Metadata) (catalog.Status, error) {
logger.Debugf(ctx, "Updating artifact for key %+v, dataset %+v and execution %+v", key, datasetID, metadata)
- artifactDataList := make([]*datacatalog.ArtifactData, 0, len(outputs.Literals))
- for name, value := range outputs.Literals {
+ artifactDataList := make([]*datacatalog.ArtifactData, 0, len(outputs.GetLiterals()))
+ for name, value := range outputs.GetLiterals() {
artifactData := &datacatalog.ArtifactData{
Name: name,
Value: value,
@@ -305,7 +305,7 @@ func (m *CatalogClient) updateArtifact(ctx context.Context, key catalog.Key, dat
return catalog.Status{}, fmt.Errorf("failed to get source from metadata. Error: %w", err)
}
- logger.Debugf(ctx, "Successfully updated artifact with ID %v and %d outputs for key %+v, dataset %+v and execution %+v", tag.ArtifactId, len(artifactDataList), key, datasetID, metadata)
+ logger.Debugf(ctx, "Successfully updated artifact with ID %v and %d outputs for key %+v, dataset %+v and execution %+v", tag.GetArtifactId(), len(artifactDataList), key, datasetID, metadata)
return catalog.NewStatus(core.CatalogCacheStatus_CACHE_POPULATED, EventCatalogMetadata(datasetID, tag, source)), nil
}
@@ -382,7 +382,7 @@ func (m *CatalogClient) GetOrExtendReservation(ctx context.Context, key catalog.
}
inputs := &core.LiteralMap{}
- if key.TypedInterface.Inputs != nil {
+ if key.TypedInterface.GetInputs() != nil {
retInputs, err := key.InputReader.Get(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to read inputs when trying to query catalog")
@@ -409,7 +409,7 @@ func (m *CatalogClient) GetOrExtendReservation(ctx context.Context, key catalog.
return nil, err
}
- return response.Reservation, nil
+ return response.GetReservation(), nil
}
// ReleaseReservation attempts to release a reservation for a cacheable task. If the reservation
@@ -422,7 +422,7 @@ func (m *CatalogClient) ReleaseReservation(ctx context.Context, key catalog.Key,
}
inputs := &core.LiteralMap{}
- if key.TypedInterface.Inputs != nil {
+ if key.TypedInterface.GetInputs() != nil {
retInputs, err := key.InputReader.Get(ctx)
if err != nil {
return errors.Wrap(err, "failed to read inputs when trying to query catalog")
diff --git a/flytepropeller/pkg/controller/nodes/catalog/datacatalog/datacatalog_test.go b/flytepropeller/pkg/controller/nodes/catalog/datacatalog/datacatalog_test.go
index ce8f6f4069..2a4c1a07eb 100644
--- a/flytepropeller/pkg/controller/nodes/catalog/datacatalog/datacatalog_test.go
+++ b/flytepropeller/pkg/controller/nodes/catalog/datacatalog/datacatalog_test.go
@@ -108,7 +108,7 @@ func TestCatalog_Get(t *testing.T) {
mockClient.On("GetDataset",
ctx,
mock.MatchedBy(func(o *datacatalog.GetDatasetRequest) bool {
- assert.EqualValues(t, datasetID.String(), o.Dataset.String())
+ assert.EqualValues(t, datasetID.String(), o.GetDataset().String())
return true
}),
).Return(nil, status.Error(codes.NotFound, "test not found"))
@@ -136,7 +136,7 @@ func TestCatalog_Get(t *testing.T) {
mockClient.On("GetDataset",
ctx,
mock.MatchedBy(func(o *datacatalog.GetDatasetRequest) bool {
- assert.EqualValues(t, datasetID.String(), o.Dataset.String())
+ assert.EqualValues(t, datasetID.String(), o.GetDataset().String())
return true
}),
).Return(&datacatalog.GetDatasetResponse{Dataset: sampleDataSet}, nil, "")
@@ -167,9 +167,9 @@ func TestCatalog_Get(t *testing.T) {
taskID := &core.TaskExecutionIdentifier{
TaskId: &core.Identifier{
ResourceType: core.ResourceType_TASK,
- Name: sampleKey.Identifier.Name,
- Project: sampleKey.Identifier.Project,
- Domain: sampleKey.Identifier.Domain,
+ Name: sampleKey.Identifier.GetName(),
+ Project: sampleKey.Identifier.GetProject(),
+ Domain: sampleKey.Identifier.GetDomain(),
Version: "ver",
},
NodeExecutionId: &core.NodeExecutionIdentifier{
@@ -190,14 +190,14 @@ func TestCatalog_Get(t *testing.T) {
mockClient.On("GetDataset",
ctx,
mock.MatchedBy(func(o *datacatalog.GetDatasetRequest) bool {
- assert.EqualValues(t, datasetID, o.Dataset)
+ assert.EqualValues(t, datasetID, o.GetDataset())
return true
}),
).Return(&datacatalog.GetDatasetResponse{Dataset: sampleDataSet}, nil)
sampleArtifact := &datacatalog.Artifact{
Id: "test-artifact",
- Dataset: sampleDataSet.Id,
+ Dataset: sampleDataSet.GetId(),
Data: []*datacatalog.ArtifactData{sampleArtifactData},
Metadata: GetArtifactMetadataForSource(taskID),
Tags: []*datacatalog.Tag{
@@ -208,16 +208,16 @@ func TestCatalog_Get(t *testing.T) {
},
}
- assert.Equal(t, taskID.NodeExecutionId.ExecutionId.Name, sampleArtifact.GetMetadata().KeyMap[execNameKey])
- assert.Equal(t, taskID.NodeExecutionId.NodeId, sampleArtifact.GetMetadata().KeyMap[execNodeIDKey])
- assert.Equal(t, taskID.NodeExecutionId.ExecutionId.Project, sampleArtifact.GetMetadata().KeyMap[execProjectKey])
- assert.Equal(t, taskID.NodeExecutionId.ExecutionId.Domain, sampleArtifact.GetMetadata().KeyMap[execDomainKey])
- assert.Equal(t, strconv.Itoa(int(taskID.RetryAttempt)), sampleArtifact.GetMetadata().KeyMap[execTaskAttemptKey])
+ assert.Equal(t, taskID.GetNodeExecutionId().GetExecutionId().GetName(), sampleArtifact.GetMetadata().GetKeyMap()[execNameKey])
+ assert.Equal(t, taskID.GetNodeExecutionId().GetNodeId(), sampleArtifact.GetMetadata().GetKeyMap()[execNodeIDKey])
+ assert.Equal(t, taskID.GetNodeExecutionId().GetExecutionId().GetProject(), sampleArtifact.GetMetadata().GetKeyMap()[execProjectKey])
+ assert.Equal(t, taskID.GetNodeExecutionId().GetExecutionId().GetDomain(), sampleArtifact.GetMetadata().GetKeyMap()[execDomainKey])
+ assert.Equal(t, strconv.Itoa(int(taskID.GetRetryAttempt())), sampleArtifact.GetMetadata().GetKeyMap()[execTaskAttemptKey])
mockClient.On("GetArtifact",
ctx,
mock.MatchedBy(func(o *datacatalog.GetArtifactRequest) bool {
- assert.EqualValues(t, datasetID, o.Dataset)
+ assert.EqualValues(t, datasetID, o.GetDataset())
assert.Equal(t, "flyte_cached-BE6CZsMk6N3ExR_4X9EuwBgj2Jh2UwasXK3a_pM9xlY", o.GetTagName())
return true
}),
@@ -228,18 +228,18 @@ func TestCatalog_Get(t *testing.T) {
resp, err := catalogClient.Get(ctx, newKey)
assert.NoError(t, err)
assert.Equal(t, core.CatalogCacheStatus_CACHE_HIT.String(), resp.GetStatus().GetCacheStatus().String())
- assert.NotNil(t, resp.GetStatus().GetMetadata().DatasetId)
- assert.Equal(t, core.ResourceType_DATASET, resp.GetStatus().GetMetadata().DatasetId.ResourceType)
- assert.Equal(t, datasetID.Name, resp.GetStatus().GetMetadata().DatasetId.Name)
- assert.Equal(t, datasetID.Project, resp.GetStatus().GetMetadata().DatasetId.Project)
- assert.Equal(t, datasetID.Domain, resp.GetStatus().GetMetadata().DatasetId.Domain)
- assert.Equal(t, datasetID.Version, resp.GetStatus().GetMetadata().DatasetId.Version)
- assert.NotNil(t, resp.GetStatus().GetMetadata().ArtifactTag)
- assert.NotNil(t, resp.GetStatus().GetMetadata().SourceExecution)
+ assert.NotNil(t, resp.GetStatus().GetMetadata().GetDatasetId())
+ assert.Equal(t, core.ResourceType_DATASET, resp.GetStatus().GetMetadata().GetDatasetId().GetResourceType())
+ assert.Equal(t, datasetID.GetName(), resp.GetStatus().GetMetadata().GetDatasetId().GetName())
+ assert.Equal(t, datasetID.GetProject(), resp.GetStatus().GetMetadata().GetDatasetId().GetProject())
+ assert.Equal(t, datasetID.GetDomain(), resp.GetStatus().GetMetadata().GetDatasetId().GetDomain())
+ assert.Equal(t, datasetID.GetVersion(), resp.GetStatus().GetMetadata().GetDatasetId().GetVersion())
+ assert.NotNil(t, resp.GetStatus().GetMetadata().GetArtifactTag())
+ assert.NotNil(t, resp.GetStatus().GetMetadata().GetSourceExecution())
sourceTID := resp.GetStatus().GetMetadata().GetSourceTaskExecution()
- assert.Equal(t, taskID.TaskId.String(), sourceTID.TaskId.String())
- assert.Equal(t, taskID.RetryAttempt, sourceTID.RetryAttempt)
- assert.Equal(t, taskID.NodeExecutionId.String(), sourceTID.NodeExecutionId.String())
+ assert.Equal(t, taskID.GetTaskId().String(), sourceTID.GetTaskId().String())
+ assert.Equal(t, taskID.GetRetryAttempt(), sourceTID.GetRetryAttempt())
+ assert.Equal(t, taskID.GetNodeExecutionId().String(), sourceTID.GetNodeExecutionId().String())
})
t.Run("Found expired artifact", func(t *testing.T) {
@@ -259,7 +259,7 @@ func TestCatalog_Get(t *testing.T) {
mockClient.On("GetDataset",
ctx,
mock.MatchedBy(func(o *datacatalog.GetDatasetRequest) bool {
- assert.EqualValues(t, datasetID, o.Dataset)
+ assert.EqualValues(t, datasetID, o.GetDataset())
return true
}),
).Return(&datacatalog.GetDatasetResponse{Dataset: sampleDataSet}, nil)
@@ -268,14 +268,14 @@ func TestCatalog_Get(t *testing.T) {
sampleArtifact := &datacatalog.Artifact{
Id: "test-artifact",
- Dataset: sampleDataSet.Id,
+ Dataset: sampleDataSet.GetId(),
Data: []*datacatalog.ArtifactData{sampleArtifactData},
CreatedAt: createdAt,
}
mockClient.On("GetArtifact",
ctx,
mock.MatchedBy(func(o *datacatalog.GetArtifactRequest) bool {
- assert.EqualValues(t, datasetID, o.Dataset)
+ assert.EqualValues(t, datasetID, o.GetDataset())
assert.Equal(t, "flyte_cached-BE6CZsMk6N3ExR_4X9EuwBgj2Jh2UwasXK3a_pM9xlY", o.GetTagName())
return true
}),
@@ -309,7 +309,7 @@ func TestCatalog_Get(t *testing.T) {
mockClient.On("GetDataset",
ctx,
mock.MatchedBy(func(o *datacatalog.GetDatasetRequest) bool {
- assert.EqualValues(t, datasetID, o.Dataset)
+ assert.EqualValues(t, datasetID, o.GetDataset())
return true
}),
).Return(&datacatalog.GetDatasetResponse{Dataset: sampleDataSet}, nil)
@@ -318,14 +318,14 @@ func TestCatalog_Get(t *testing.T) {
sampleArtifact := &datacatalog.Artifact{
Id: "test-artifact",
- Dataset: sampleDataSet.Id,
+ Dataset: sampleDataSet.GetId(),
Data: []*datacatalog.ArtifactData{sampleArtifactData},
CreatedAt: createdAt,
}
mockClient.On("GetArtifact",
ctx,
mock.MatchedBy(func(o *datacatalog.GetArtifactRequest) bool {
- assert.EqualValues(t, datasetID, o.Dataset)
+ assert.EqualValues(t, datasetID, o.GetDataset())
assert.Equal(t, "flyte_cached-BE6CZsMk6N3ExR_4X9EuwBgj2Jh2UwasXK3a_pM9xlY", o.GetTagName())
return true
}),
@@ -356,20 +356,20 @@ func TestCatalog_Get(t *testing.T) {
mockClient.On("GetDataset",
ctx,
mock.MatchedBy(func(o *datacatalog.GetDatasetRequest) bool {
- assert.EqualValues(t, "1.0.0-GKw-c0Pw-GKw-c0Pw", o.Dataset.Version)
+ assert.EqualValues(t, "1.0.0-GKw-c0Pw-GKw-c0Pw", o.GetDataset().GetVersion())
return true
}),
).Return(&datacatalog.GetDatasetResponse{Dataset: sampleDataSet}, nil)
sampleArtifact := &datacatalog.Artifact{
Id: "test-artifact",
- Dataset: sampleDataSet.Id,
+ Dataset: sampleDataSet.GetId(),
Data: []*datacatalog.ArtifactData{},
}
mockClient.On("GetArtifact",
ctx,
mock.MatchedBy(func(o *datacatalog.GetArtifactRequest) bool {
- assert.EqualValues(t, "1.0.0-GKw-c0Pw-GKw-c0Pw", o.Dataset.Version)
+ assert.EqualValues(t, "1.0.0-GKw-c0Pw-GKw-c0Pw", o.GetDataset().GetVersion())
assert.Equal(t, "flyte_cached-GKw-c0PwFokMUQ6T-TUmEWnZ4_VlQ2Qpgw-vCTT0-OQ", o.GetTagName())
return true
}),
@@ -385,7 +385,7 @@ func TestCatalog_Get(t *testing.T) {
v, e, err := resp.GetOutputs().Read(ctx)
assert.NoError(t, err)
assert.Nil(t, e)
- assert.Len(t, v.Literals, 0)
+ assert.Len(t, v.GetLiterals(), 0)
})
}
@@ -404,7 +404,7 @@ func TestCatalog_Put(t *testing.T) {
mockClient.On("CreateDataset",
ctx,
mock.MatchedBy(func(o *datacatalog.CreateDatasetRequest) bool {
- assert.True(t, proto.Equal(o.Dataset.Id, datasetID))
+ assert.True(t, proto.Equal(o.GetDataset().GetId(), datasetID))
return true
}),
).Return(&datacatalog.CreateDatasetResponse{}, nil)
@@ -412,11 +412,11 @@ func TestCatalog_Put(t *testing.T) {
mockClient.On("CreateArtifact",
ctx,
mock.MatchedBy(func(o *datacatalog.CreateArtifactRequest) bool {
- _, parseErr := uuid.Parse(o.Artifact.Id)
+ _, parseErr := uuid.Parse(o.GetArtifact().GetId())
assert.NoError(t, parseErr)
- assert.EqualValues(t, 1, len(o.Artifact.Data))
- assert.EqualValues(t, "out1", o.Artifact.Data[0].Name)
- assert.True(t, proto.Equal(newStringLiteral("output1-stringval"), o.Artifact.Data[0].Value))
+ assert.EqualValues(t, 1, len(o.GetArtifact().GetData()))
+ assert.EqualValues(t, "out1", o.GetArtifact().GetData()[0].GetName())
+ assert.True(t, proto.Equal(newStringLiteral("output1-stringval"), o.GetArtifact().GetData()[0].GetValue()))
return true
}),
).Return(&datacatalog.CreateArtifactResponse{}, nil)
@@ -424,7 +424,7 @@ func TestCatalog_Put(t *testing.T) {
mockClient.On("AddTag",
ctx,
mock.MatchedBy(func(o *datacatalog.AddTagRequest) bool {
- assert.EqualValues(t, "flyte_cached-BE6CZsMk6N3ExR_4X9EuwBgj2Jh2UwasXK3a_pM9xlY", o.Tag.Name)
+ assert.EqualValues(t, "flyte_cached-BE6CZsMk6N3ExR_4X9EuwBgj2Jh2UwasXK3a_pM9xlY", o.GetTag().GetName())
return true
}),
).Return(&datacatalog.AddTagResponse{}, nil)
@@ -440,7 +440,7 @@ func TestCatalog_Put(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, core.CatalogCacheStatus_CACHE_POPULATED, s.GetCacheStatus())
assert.NotNil(t, s.GetMetadata())
- assert.Equal(t, "flyte_cached-BE6CZsMk6N3ExR_4X9EuwBgj2Jh2UwasXK3a_pM9xlY", s.GetMetadata().ArtifactTag.Name)
+ assert.Equal(t, "flyte_cached-BE6CZsMk6N3ExR_4X9EuwBgj2Jh2UwasXK3a_pM9xlY", s.GetMetadata().GetArtifactTag().GetName())
})
t.Run("Create dataset fails", func(t *testing.T) {
@@ -519,7 +519,7 @@ func TestCatalog_Put(t *testing.T) {
mockClient.On("CreateDataset",
ctx,
mock.MatchedBy(func(o *datacatalog.CreateDatasetRequest) bool {
- assert.Equal(t, "1.0.0-GKw-c0Pw-GKw-c0Pw", o.Dataset.Id.Version)
+ assert.Equal(t, "1.0.0-GKw-c0Pw-GKw-c0Pw", o.GetDataset().GetId().GetVersion())
return true
}),
).Return(&datacatalog.CreateDatasetResponse{}, nil)
@@ -527,7 +527,7 @@ func TestCatalog_Put(t *testing.T) {
mockClient.On("CreateArtifact",
ctx,
mock.MatchedBy(func(o *datacatalog.CreateArtifactRequest) bool {
- assert.EqualValues(t, 0, len(o.Artifact.Data))
+ assert.EqualValues(t, 0, len(o.GetArtifact().GetData()))
return true
}),
).Return(&datacatalog.CreateArtifactResponse{}, nil)
@@ -535,7 +535,7 @@ func TestCatalog_Put(t *testing.T) {
mockClient.On("AddTag",
ctx,
mock.MatchedBy(func(o *datacatalog.AddTagRequest) bool {
- assert.EqualValues(t, "flyte_cached-GKw-c0PwFokMUQ6T-TUmEWnZ4_VlQ2Qpgw-vCTT0-OQ", o.Tag.Name)
+ assert.EqualValues(t, "flyte_cached-GKw-c0PwFokMUQ6T-TUmEWnZ4_VlQ2Qpgw-vCTT0-OQ", o.GetTag().GetName())
return true
}),
).Return(&datacatalog.AddTagResponse{}, nil)
@@ -567,11 +567,11 @@ func TestCatalog_Put(t *testing.T) {
mockClient.On("CreateArtifact",
ctx,
mock.MatchedBy(func(o *datacatalog.CreateArtifactRequest) bool {
- _, parseErr := uuid.Parse(o.Artifact.Id)
+ _, parseErr := uuid.Parse(o.GetArtifact().GetId())
assert.NoError(t, parseErr)
- assert.EqualValues(t, 1, len(o.Artifact.Data))
- assert.EqualValues(t, "out1", o.Artifact.Data[0].Name)
- assert.True(t, proto.Equal(newStringLiteral("output1-stringval"), o.Artifact.Data[0].Value))
+ assert.EqualValues(t, 1, len(o.GetArtifact().GetData()))
+ assert.EqualValues(t, "out1", o.GetArtifact().GetData()[0].GetName())
+ assert.True(t, proto.Equal(newStringLiteral("output1-stringval"), o.GetArtifact().GetData()[0].GetValue()))
createArtifactCalled = true
return true
}),
@@ -581,7 +581,7 @@ func TestCatalog_Put(t *testing.T) {
mockClient.On("AddTag",
ctx,
mock.MatchedBy(func(o *datacatalog.AddTagRequest) bool {
- assert.EqualValues(t, "flyte_cached-BE6CZsMk6N3ExR_4X9EuwBgj2Jh2UwasXK3a_pM9xlY", o.Tag.Name)
+ assert.EqualValues(t, "flyte_cached-BE6CZsMk6N3ExR_4X9EuwBgj2Jh2UwasXK3a_pM9xlY", o.GetTag().GetName())
addTagCalled = true
return true
}),
@@ -619,7 +619,7 @@ func TestCatalog_Update(t *testing.T) {
mockClient.On("CreateDataset",
ctx,
mock.MatchedBy(func(o *datacatalog.CreateDatasetRequest) bool {
- assert.True(t, proto.Equal(o.Dataset.Id, datasetID))
+ assert.True(t, proto.Equal(o.GetDataset().GetId(), datasetID))
return true
}),
).Return(&datacatalog.CreateDatasetResponse{}, nil)
@@ -627,8 +627,8 @@ func TestCatalog_Update(t *testing.T) {
mockClient.On("UpdateArtifact",
ctx,
mock.MatchedBy(func(o *datacatalog.UpdateArtifactRequest) bool {
- assert.True(t, proto.Equal(o.Dataset, datasetID))
- assert.IsType(t, &datacatalog.UpdateArtifactRequest_TagName{}, o.QueryHandle)
+ assert.True(t, proto.Equal(o.GetDataset(), datasetID))
+ assert.IsType(t, &datacatalog.UpdateArtifactRequest_TagName{}, o.GetQueryHandle())
assert.Equal(t, tagName, o.GetTagName())
return true
}),
@@ -637,9 +637,9 @@ func TestCatalog_Update(t *testing.T) {
taskID := &core.TaskExecutionIdentifier{
TaskId: &core.Identifier{
ResourceType: core.ResourceType_TASK,
- Name: sampleKey.Identifier.Name,
- Project: sampleKey.Identifier.Project,
- Domain: sampleKey.Identifier.Domain,
+ Name: sampleKey.Identifier.GetName(),
+ Project: sampleKey.Identifier.GetProject(),
+ Domain: sampleKey.Identifier.GetDomain(),
Version: "version",
},
NodeExecutionId: &core.NodeExecutionIdentifier{
@@ -658,24 +658,24 @@ func TestCatalog_Update(t *testing.T) {
or := ioutils.NewInMemoryOutputReader(sampleParameters, nil, nil)
s, err := discovery.Update(ctx, newKey, or, catalog.Metadata{
WorkflowExecutionIdentifier: &core.WorkflowExecutionIdentifier{
- Name: taskID.NodeExecutionId.ExecutionId.Name,
- Domain: taskID.NodeExecutionId.ExecutionId.Domain,
- Project: taskID.NodeExecutionId.ExecutionId.Project,
+ Name: taskID.GetNodeExecutionId().GetExecutionId().GetName(),
+ Domain: taskID.GetNodeExecutionId().GetExecutionId().GetDomain(),
+ Project: taskID.GetNodeExecutionId().GetExecutionId().GetProject(),
},
TaskExecutionIdentifier: &core.TaskExecutionIdentifier{
TaskId: &sampleKey.Identifier,
- NodeExecutionId: taskID.NodeExecutionId,
+ NodeExecutionId: taskID.GetNodeExecutionId(),
RetryAttempt: 0,
},
})
assert.NoError(t, err)
assert.Equal(t, core.CatalogCacheStatus_CACHE_POPULATED, s.GetCacheStatus())
assert.NotNil(t, s.GetMetadata())
- assert.Equal(t, tagName, s.GetMetadata().ArtifactTag.Name)
+ assert.Equal(t, tagName, s.GetMetadata().GetArtifactTag().GetName())
sourceTID := s.GetMetadata().GetSourceTaskExecution()
- assert.Equal(t, taskID.TaskId.String(), sourceTID.TaskId.String())
- assert.Equal(t, taskID.RetryAttempt, sourceTID.RetryAttempt)
- assert.Equal(t, taskID.NodeExecutionId.String(), sourceTID.NodeExecutionId.String())
+ assert.Equal(t, taskID.GetTaskId().String(), sourceTID.GetTaskId().String())
+ assert.Equal(t, taskID.GetRetryAttempt(), sourceTID.GetRetryAttempt())
+ assert.Equal(t, taskID.GetNodeExecutionId().String(), sourceTID.GetNodeExecutionId().String())
})
t.Run("Overwrite non-existing execution", func(t *testing.T) {
@@ -706,9 +706,9 @@ func TestCatalog_Update(t *testing.T) {
taskID := &core.TaskExecutionIdentifier{
TaskId: &core.Identifier{
ResourceType: core.ResourceType_TASK,
- Name: sampleKey.Identifier.Name,
- Project: sampleKey.Identifier.Project,
- Domain: sampleKey.Identifier.Domain,
+ Name: sampleKey.Identifier.GetName(),
+ Project: sampleKey.Identifier.GetProject(),
+ Domain: sampleKey.Identifier.GetDomain(),
Version: "version",
},
NodeExecutionId: &core.NodeExecutionIdentifier{
@@ -727,13 +727,13 @@ func TestCatalog_Update(t *testing.T) {
or := ioutils.NewInMemoryOutputReader(sampleParameters, nil, nil)
s, err := discovery.Update(ctx, newKey, or, catalog.Metadata{
WorkflowExecutionIdentifier: &core.WorkflowExecutionIdentifier{
- Name: taskID.NodeExecutionId.ExecutionId.Name,
- Domain: taskID.NodeExecutionId.ExecutionId.Domain,
- Project: taskID.NodeExecutionId.ExecutionId.Project,
+ Name: taskID.GetNodeExecutionId().GetExecutionId().GetName(),
+ Domain: taskID.GetNodeExecutionId().GetExecutionId().GetDomain(),
+ Project: taskID.GetNodeExecutionId().GetExecutionId().GetProject(),
},
TaskExecutionIdentifier: &core.TaskExecutionIdentifier{
TaskId: &sampleKey.Identifier,
- NodeExecutionId: taskID.NodeExecutionId,
+ NodeExecutionId: taskID.GetNodeExecutionId(),
RetryAttempt: 0,
},
})
@@ -755,7 +755,7 @@ func TestCatalog_Update(t *testing.T) {
mockClient.On("CreateDataset",
ctx,
mock.MatchedBy(func(o *datacatalog.CreateDatasetRequest) bool {
- assert.True(t, proto.Equal(o.Dataset.Id, datasetID))
+ assert.True(t, proto.Equal(o.GetDataset().GetId(), datasetID))
createDatasetCalled = true
return true
}),
@@ -770,9 +770,9 @@ func TestCatalog_Update(t *testing.T) {
mockClient.On("CreateArtifact",
ctx,
mock.MatchedBy(func(o *datacatalog.CreateArtifactRequest) bool {
- _, parseErr := uuid.Parse(o.Artifact.Id)
+ _, parseErr := uuid.Parse(o.GetArtifact().GetId())
assert.NoError(t, parseErr)
- assert.True(t, proto.Equal(o.Artifact.Dataset, datasetID))
+ assert.True(t, proto.Equal(o.GetArtifact().GetDataset(), datasetID))
createArtifactCalled = true
return true
}),
@@ -782,7 +782,7 @@ func TestCatalog_Update(t *testing.T) {
mockClient.On("AddTag",
ctx,
mock.MatchedBy(func(o *datacatalog.AddTagRequest) bool {
- assert.EqualValues(t, "flyte_cached-BE6CZsMk6N3ExR_4X9EuwBgj2Jh2UwasXK3a_pM9xlY", o.Tag.Name)
+ assert.EqualValues(t, "flyte_cached-BE6CZsMk6N3ExR_4X9EuwBgj2Jh2UwasXK3a_pM9xlY", o.GetTag().GetName())
addTagCalled = true
return true
}),
@@ -791,9 +791,9 @@ func TestCatalog_Update(t *testing.T) {
taskID := &core.TaskExecutionIdentifier{
TaskId: &core.Identifier{
ResourceType: core.ResourceType_TASK,
- Name: sampleKey.Identifier.Name,
- Project: sampleKey.Identifier.Project,
- Domain: sampleKey.Identifier.Domain,
+ Name: sampleKey.Identifier.GetName(),
+ Project: sampleKey.Identifier.GetProject(),
+ Domain: sampleKey.Identifier.GetDomain(),
Version: "version",
},
NodeExecutionId: &core.NodeExecutionIdentifier{
@@ -812,20 +812,20 @@ func TestCatalog_Update(t *testing.T) {
or := ioutils.NewInMemoryOutputReader(sampleParameters, nil, nil)
s, err := discovery.Update(ctx, newKey, or, catalog.Metadata{
WorkflowExecutionIdentifier: &core.WorkflowExecutionIdentifier{
- Name: taskID.NodeExecutionId.ExecutionId.Name,
- Domain: taskID.NodeExecutionId.ExecutionId.Domain,
- Project: taskID.NodeExecutionId.ExecutionId.Project,
+ Name: taskID.GetNodeExecutionId().GetExecutionId().GetName(),
+ Domain: taskID.GetNodeExecutionId().GetExecutionId().GetDomain(),
+ Project: taskID.GetNodeExecutionId().GetExecutionId().GetProject(),
},
TaskExecutionIdentifier: &core.TaskExecutionIdentifier{
TaskId: &sampleKey.Identifier,
- NodeExecutionId: taskID.NodeExecutionId,
+ NodeExecutionId: taskID.GetNodeExecutionId(),
RetryAttempt: 0,
},
})
assert.NoError(t, err)
assert.Equal(t, core.CatalogCacheStatus_CACHE_POPULATED, s.GetCacheStatus())
assert.NotNil(t, s.GetMetadata())
- assert.Equal(t, tagName, s.GetMetadata().ArtifactTag.Name)
+ assert.Equal(t, tagName, s.GetMetadata().GetArtifactTag().GetName())
assert.Nil(t, s.GetMetadata().GetSourceTaskExecution())
assert.True(t, createDatasetCalled)
assert.True(t, updateArtifactCalled)
@@ -932,8 +932,8 @@ func TestCatalog_GetOrExtendReservation(t *testing.T) {
mockClient.On("GetOrExtendReservation",
ctx,
mock.MatchedBy(func(o *datacatalog.GetOrExtendReservationRequest) bool {
- assert.EqualValues(t, datasetID.String(), o.ReservationId.DatasetId.String())
- assert.EqualValues(t, tagName, o.ReservationId.TagName)
+ assert.EqualValues(t, datasetID.String(), o.GetReservationId().GetDatasetId().String())
+ assert.EqualValues(t, tagName, o.GetReservationId().GetTagName())
return true
}),
).Return(&datacatalog.GetOrExtendReservationResponse{Reservation: ¤tReservation}, nil, "")
@@ -943,7 +943,7 @@ func TestCatalog_GetOrExtendReservation(t *testing.T) {
reservation, err := catalogClient.GetOrExtendReservation(ctx, newKey, currentOwner, heartbeatInterval)
assert.NoError(t, err)
- assert.Equal(t, reservation.OwnerId, currentOwner)
+ assert.Equal(t, reservation.GetOwnerId(), currentOwner)
})
t.Run("ExistingReservation", func(t *testing.T) {
@@ -958,8 +958,8 @@ func TestCatalog_GetOrExtendReservation(t *testing.T) {
mockClient.On("GetOrExtendReservation",
ctx,
mock.MatchedBy(func(o *datacatalog.GetOrExtendReservationRequest) bool {
- assert.EqualValues(t, datasetID.String(), o.ReservationId.DatasetId.String())
- assert.EqualValues(t, tagName, o.ReservationId.TagName)
+ assert.EqualValues(t, datasetID.String(), o.GetReservationId().GetDatasetId().String())
+ assert.EqualValues(t, tagName, o.GetReservationId().GetTagName())
return true
}),
).Return(&datacatalog.GetOrExtendReservationResponse{Reservation: &prevReservation}, nil, "")
@@ -969,7 +969,7 @@ func TestCatalog_GetOrExtendReservation(t *testing.T) {
reservation, err := catalogClient.GetOrExtendReservation(ctx, newKey, currentOwner, heartbeatInterval)
assert.NoError(t, err)
- assert.Equal(t, reservation.OwnerId, prevOwner)
+ assert.Equal(t, reservation.GetOwnerId(), prevOwner)
})
}
@@ -988,8 +988,8 @@ func TestCatalog_ReleaseReservation(t *testing.T) {
mockClient.On("ReleaseReservation",
ctx,
mock.MatchedBy(func(o *datacatalog.ReleaseReservationRequest) bool {
- assert.EqualValues(t, datasetID.String(), o.ReservationId.DatasetId.String())
- assert.EqualValues(t, tagName, o.ReservationId.TagName)
+ assert.EqualValues(t, datasetID.String(), o.GetReservationId().GetDatasetId().String())
+ assert.EqualValues(t, tagName, o.GetReservationId().GetTagName())
return true
}),
).Return(&datacatalog.ReleaseReservationResponse{}, nil, "")
@@ -1013,8 +1013,8 @@ func TestCatalog_ReleaseReservation(t *testing.T) {
mockClient.On("ReleaseReservation",
ctx,
mock.MatchedBy(func(o *datacatalog.ReleaseReservationRequest) bool {
- assert.EqualValues(t, datasetID.String(), o.ReservationId.DatasetId.String())
- assert.EqualValues(t, tagName, o.ReservationId.TagName)
+ assert.EqualValues(t, datasetID.String(), o.GetReservationId().GetDatasetId().String())
+ assert.EqualValues(t, tagName, o.GetReservationId().GetTagName())
return true
}),
).Return(nil, status.Error(codes.NotFound, "reservation not found"))
diff --git a/flytepropeller/pkg/controller/nodes/catalog/datacatalog/transformer.go b/flytepropeller/pkg/controller/nodes/catalog/datacatalog/transformer.go
index ba94bdadec..403d1a6885 100644
--- a/flytepropeller/pkg/controller/nodes/catalog/datacatalog/transformer.go
+++ b/flytepropeller/pkg/controller/nodes/catalog/datacatalog/transformer.go
@@ -26,43 +26,43 @@ var emptyLiteralMap = core.LiteralMap{Literals: map[string]*core.Literal{}}
var emptyVariableMap = core.VariableMap{Variables: map[string]*core.Variable{}}
func getDatasetNameFromTask(taskID core.Identifier) string {
- return fmt.Sprintf("%s-%s", taskNamespace, taskID.Name)
+ return fmt.Sprintf("%s-%s", taskNamespace, taskID.GetName())
}
// Transform the artifact Data into task execution outputs as a literal map
func GenerateTaskOutputsFromArtifact(id core.Identifier, taskInterface core.TypedInterface, artifact *datacatalog.Artifact) (*core.LiteralMap, error) {
// if there are no outputs in the task, return empty map
- if taskInterface.Outputs == nil || len(taskInterface.Outputs.Variables) == 0 {
+ if taskInterface.GetOutputs() == nil || len(taskInterface.GetOutputs().GetVariables()) == 0 {
return &emptyLiteralMap, nil
}
- outputVariables := taskInterface.Outputs.Variables
- artifactDataList := artifact.Data
+ outputVariables := taskInterface.GetOutputs().GetVariables()
+ artifactDataList := artifact.GetData()
// verify the task outputs matches what is stored in ArtifactData
if len(outputVariables) != len(artifactDataList) {
- return nil, fmt.Errorf("the task %s with %d outputs, should have %d artifactData for artifact %s", id.String(), len(outputVariables), len(artifactDataList), artifact.Id)
+ return nil, fmt.Errorf("the task %s with %d outputs, should have %d artifactData for artifact %s", id.String(), len(outputVariables), len(artifactDataList), artifact.GetId())
}
outputs := make(map[string]*core.Literal, len(artifactDataList))
for _, artifactData := range artifactDataList {
// verify that the name and type of artifactData matches what is expected from the interface
- if _, ok := outputVariables[artifactData.Name]; !ok {
- return nil, fmt.Errorf("unexpected artifactData with name [%v] does not match any task output variables %v", artifactData.Name, reflect.ValueOf(outputVariables).MapKeys())
+ if _, ok := outputVariables[artifactData.GetName()]; !ok {
+ return nil, fmt.Errorf("unexpected artifactData with name [%v] does not match any task output variables %v", artifactData.GetName(), reflect.ValueOf(outputVariables).MapKeys())
}
- expectedVarType := outputVariables[artifactData.Name].GetType()
- inputType := validators.LiteralTypeForLiteral(artifactData.Value)
+ expectedVarType := outputVariables[artifactData.GetName()].GetType()
+ inputType := validators.LiteralTypeForLiteral(artifactData.GetValue())
err := validators.ValidateLiteralType(inputType)
if err != nil {
- return nil, fmt.Errorf("failed to validate literal type for %s with err: %s", artifactData.Name, err)
+ return nil, fmt.Errorf("failed to validate literal type for %s with err: %s", artifactData.GetName(), err)
}
if !validators.AreTypesCastable(inputType, expectedVarType) {
- return nil, fmt.Errorf("unexpected artifactData: [%v] type: [%v] does not match any task output type: [%v]", artifactData.Name, inputType, expectedVarType)
+ return nil, fmt.Errorf("unexpected artifactData: [%v] type: [%v] does not match any task output type: [%v]", artifactData.GetName(), inputType, expectedVarType)
}
- outputs[artifactData.Name] = artifactData.Value
+ outputs[artifactData.GetName()] = artifactData.GetValue()
}
return &core.LiteralMap{Literals: outputs}, nil
@@ -86,12 +86,12 @@ func generateTaskSignatureHash(ctx context.Context, taskInterface core.TypedInte
taskInputs := &emptyVariableMap
taskOutputs := &emptyVariableMap
- if taskInterface.Inputs != nil && len(taskInterface.Inputs.Variables) != 0 {
- taskInputs = taskInterface.Inputs
+ if taskInterface.GetInputs() != nil && len(taskInterface.GetInputs().GetVariables()) != 0 {
+ taskInputs = taskInterface.GetInputs()
}
- if taskInterface.Outputs != nil && len(taskInterface.Outputs.Variables) != 0 {
- taskOutputs = taskInterface.Outputs
+ if taskInterface.GetOutputs() != nil && len(taskInterface.GetOutputs().GetVariables()) != 0 {
+ taskOutputs = taskInterface.GetOutputs()
}
inputHash, err := pbhash.ComputeHash(ctx, taskInputs)
@@ -138,8 +138,8 @@ func GenerateDatasetIDForTask(ctx context.Context, k catalog.Key) (*datacatalog.
}
datasetID := &datacatalog.DatasetID{
- Project: k.Identifier.Project,
- Domain: k.Identifier.Domain,
+ Project: k.Identifier.GetProject(),
+ Domain: k.Identifier.GetDomain(),
Name: getDatasetNameFromTask(k.Identifier),
Version: datasetVersion,
}
@@ -150,7 +150,7 @@ func DatasetIDToIdentifier(id *datacatalog.DatasetID) *core.Identifier {
if id == nil {
return nil
}
- return &core.Identifier{ResourceType: core.ResourceType_DATASET, Name: id.Name, Project: id.Project, Domain: id.Domain, Version: id.Version}
+ return &core.Identifier{ResourceType: core.ResourceType_DATASET, Name: id.GetName(), Project: id.GetProject(), Domain: id.GetDomain(), Version: id.GetVersion()}
}
// With Node-Node relationship this is bound to change. So lets keep it extensible
@@ -175,7 +175,7 @@ func GetDatasetMetadataForSource(taskExecutionID *core.TaskExecutionIdentifier)
}
return &datacatalog.Metadata{
KeyMap: map[string]string{
- taskVersionKey: taskExecutionID.TaskId.Version,
+ taskVersionKey: taskExecutionID.GetTaskId().GetVersion(),
},
}
}
@@ -186,10 +186,10 @@ func GetArtifactMetadataForSource(taskExecutionID *core.TaskExecutionIdentifier)
}
return &datacatalog.Metadata{
KeyMap: map[string]string{
- execProjectKey: taskExecutionID.NodeExecutionId.GetExecutionId().GetProject(),
- execDomainKey: taskExecutionID.NodeExecutionId.GetExecutionId().GetDomain(),
- execNameKey: taskExecutionID.NodeExecutionId.GetExecutionId().GetName(),
- execNodeIDKey: taskExecutionID.NodeExecutionId.GetNodeId(),
+ execProjectKey: taskExecutionID.GetNodeExecutionId().GetExecutionId().GetProject(),
+ execDomainKey: taskExecutionID.GetNodeExecutionId().GetExecutionId().GetDomain(),
+ execNameKey: taskExecutionID.GetNodeExecutionId().GetExecutionId().GetName(),
+ execNodeIDKey: taskExecutionID.GetNodeExecutionId().GetNodeId(),
execTaskAttemptKey: strconv.Itoa(int(taskExecutionID.GetRetryAttempt())),
},
}
@@ -207,7 +207,7 @@ func GetSourceFromMetadata(datasetMd, artifactMd *datacatalog.Metadata, currentI
}
// Jul-06-2020 DataCatalog stores only wfExecutionKey & taskVersionKey So we will default the project / domain to the current dataset's project domain
- val := GetOrDefault(artifactMd.KeyMap, execTaskAttemptKey, "0")
+ val := GetOrDefault(artifactMd.GetKeyMap(), execTaskAttemptKey, "0")
attempt, err := strconv.ParseUint(val, 10, 32)
if err != nil {
return nil, fmt.Errorf("failed to parse [%v] to integer. Error: %w", val, err)
@@ -215,19 +215,19 @@ func GetSourceFromMetadata(datasetMd, artifactMd *datacatalog.Metadata, currentI
return &core.TaskExecutionIdentifier{
TaskId: &core.Identifier{
- ResourceType: currentID.ResourceType,
- Project: currentID.Project,
- Domain: currentID.Domain,
- Name: currentID.Name,
- Version: GetOrDefault(datasetMd.KeyMap, taskVersionKey, "unknown"),
+ ResourceType: currentID.GetResourceType(),
+ Project: currentID.GetProject(),
+ Domain: currentID.GetDomain(),
+ Name: currentID.GetName(),
+ Version: GetOrDefault(datasetMd.GetKeyMap(), taskVersionKey, "unknown"),
},
RetryAttempt: uint32(attempt),
NodeExecutionId: &core.NodeExecutionIdentifier{
- NodeId: GetOrDefault(artifactMd.KeyMap, execNodeIDKey, "unknown"),
+ NodeId: GetOrDefault(artifactMd.GetKeyMap(), execNodeIDKey, "unknown"),
ExecutionId: &core.WorkflowExecutionIdentifier{
- Project: GetOrDefault(artifactMd.KeyMap, execProjectKey, currentID.GetProject()),
- Domain: GetOrDefault(artifactMd.KeyMap, execDomainKey, currentID.GetDomain()),
- Name: GetOrDefault(artifactMd.KeyMap, execNameKey, "unknown"),
+ Project: GetOrDefault(artifactMd.GetKeyMap(), execProjectKey, currentID.GetProject()),
+ Domain: GetOrDefault(artifactMd.GetKeyMap(), execDomainKey, currentID.GetDomain()),
+ Name: GetOrDefault(artifactMd.GetKeyMap(), execNameKey, "unknown"),
},
},
}, nil
@@ -241,8 +241,8 @@ func EventCatalogMetadata(datasetID *datacatalog.DatasetID, tag *datacatalog.Tag
if tag != nil {
md.ArtifactTag = &core.CatalogArtifactTag{
- ArtifactId: tag.ArtifactId,
- Name: tag.Name,
+ ArtifactId: tag.GetArtifactId(),
+ Name: tag.GetName(),
}
}
diff --git a/flytepropeller/pkg/controller/nodes/catalog/datacatalog/transformer_test.go b/flytepropeller/pkg/controller/nodes/catalog/datacatalog/transformer_test.go
index 92e4c82926..6fd6455e02 100644
--- a/flytepropeller/pkg/controller/nodes/catalog/datacatalog/transformer_test.go
+++ b/flytepropeller/pkg/controller/nodes/catalog/datacatalog/transformer_test.go
@@ -32,8 +32,8 @@ func TestNilParamTask(t *testing.T) {
}
datasetID, err := GenerateDatasetIDForTask(context.TODO(), key)
assert.NoError(t, err)
- assert.NotEmpty(t, datasetID.Version)
- assert.Equal(t, "1.0.0-GKw-c0Pw-GKw-c0Pw", datasetID.Version)
+ assert.NotEmpty(t, datasetID.GetVersion())
+ assert.Equal(t, "1.0.0-GKw-c0Pw-GKw-c0Pw", datasetID.GetVersion())
}
// Ensure that empty parameters generate the same dataset as nil parameters
@@ -53,8 +53,8 @@ func TestEmptyParamTask(t *testing.T) {
}
datasetID, err := GenerateDatasetIDForTask(context.TODO(), key)
assert.NoError(t, err)
- assert.NotEmpty(t, datasetID.Version)
- assert.Equal(t, "1.0.0-GKw-c0Pw-GKw-c0Pw", datasetID.Version)
+ assert.NotEmpty(t, datasetID.GetVersion())
+ assert.Equal(t, "1.0.0-GKw-c0Pw-GKw-c0Pw", datasetID.GetVersion())
key.TypedInterface.Inputs = nil
key.TypedInterface.Outputs = nil
@@ -84,8 +84,8 @@ func TestVariableMapOrder(t *testing.T) {
}
datasetID, err := GenerateDatasetIDForTask(context.TODO(), key)
assert.NoError(t, err)
- assert.NotEmpty(t, datasetID.Version)
- assert.Equal(t, "1.0.0-UxVtPm0k-GKw-c0Pw", datasetID.Version)
+ assert.NotEmpty(t, datasetID.GetVersion())
+ assert.Equal(t, "1.0.0-UxVtPm0k-GKw-c0Pw", datasetID.GetVersion())
key.TypedInterface.Inputs = &core.VariableMap{
Variables: map[string]*core.Variable{
@@ -96,7 +96,7 @@ func TestVariableMapOrder(t *testing.T) {
datasetIDDupe, err := GenerateDatasetIDForTask(context.TODO(), key)
assert.NoError(t, err)
- assert.Equal(t, "1.0.0-UxVtPm0k-GKw-c0Pw", datasetIDDupe.Version)
+ assert.Equal(t, "1.0.0-UxVtPm0k-GKw-c0Pw", datasetIDDupe.GetVersion())
assert.Equal(t, datasetID.String(), datasetIDDupe.String())
}
@@ -173,17 +173,17 @@ func TestGetArtifactMetadataForSource(t *testing.T) {
}{
{"nil TaskExec", args{}, nil},
{"TaskExec", args{tID}, map[string]string{
- execTaskAttemptKey: strconv.Itoa(int(tID.RetryAttempt)),
- execProjectKey: tID.NodeExecutionId.ExecutionId.Project,
- execDomainKey: tID.NodeExecutionId.ExecutionId.Domain,
- execNodeIDKey: tID.NodeExecutionId.NodeId,
- execNameKey: tID.NodeExecutionId.ExecutionId.Name,
+ execTaskAttemptKey: strconv.Itoa(int(tID.GetRetryAttempt())),
+ execProjectKey: tID.GetNodeExecutionId().GetExecutionId().GetProject(),
+ execDomainKey: tID.GetNodeExecutionId().GetExecutionId().GetDomain(),
+ execNodeIDKey: tID.GetNodeExecutionId().GetNodeId(),
+ execNameKey: tID.GetNodeExecutionId().GetExecutionId().GetName(),
}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- if got := GetArtifactMetadataForSource(tt.args.taskExecutionID); !reflect.DeepEqual(got.KeyMap, tt.want) {
- t.Errorf("GetMetadataForSource() = %v, want %v", got.KeyMap, tt.want)
+ if got := GetArtifactMetadataForSource(tt.args.taskExecutionID); !reflect.DeepEqual(got.GetKeyMap(), tt.want) {
+ t.Errorf("GetMetadataForSource() = %v, want %v", got.GetKeyMap(), tt.want)
}
})
}
@@ -247,13 +247,13 @@ func TestGetSourceFromMetadata(t *testing.T) {
RetryAttempt: 0,
}},
// In legacy only taskVersionKey is available
- {"legacy", args{datasetMd: GetDatasetMetadataForSource(&tID).KeyMap, currentID: currentTaskID}, &core.TaskExecutionIdentifier{
+ {"legacy", args{datasetMd: GetDatasetMetadataForSource(&tID).GetKeyMap(), currentID: currentTaskID}, &core.TaskExecutionIdentifier{
TaskId: &core.Identifier{
ResourceType: core.ResourceType_TASK,
Name: "x",
Project: "project",
Domain: "development",
- Version: tID.TaskId.Version,
+ Version: tID.GetTaskId().GetVersion(),
},
NodeExecutionId: &core.NodeExecutionIdentifier{
ExecutionId: &core.WorkflowExecutionIdentifier{
@@ -266,7 +266,7 @@ func TestGetSourceFromMetadata(t *testing.T) {
RetryAttempt: 0,
}},
// Completely available
- {"latest", args{datasetMd: GetDatasetMetadataForSource(&tID).KeyMap, artifactMd: GetArtifactMetadataForSource(&tID).KeyMap, currentID: currentTaskID}, &tID},
+ {"latest", args{datasetMd: GetDatasetMetadataForSource(&tID).GetKeyMap(), artifactMd: GetArtifactMetadataForSource(&tID).GetKeyMap(), currentID: currentTaskID}, &tID},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -325,11 +325,11 @@ func TestEventCatalogMetadata(t *testing.T) {
func TestDatasetIDToIdentifier(t *testing.T) {
id := DatasetIDToIdentifier(&datacatalog.DatasetID{Project: "p", Domain: "d", Name: "n", Version: "v"})
- assert.Equal(t, core.ResourceType_DATASET, id.ResourceType)
- assert.Equal(t, "n", id.Name)
- assert.Equal(t, "p", id.Project)
- assert.Equal(t, "d", id.Domain)
- assert.Equal(t, "v", id.Version)
+ assert.Equal(t, core.ResourceType_DATASET, id.GetResourceType())
+ assert.Equal(t, "n", id.GetName())
+ assert.Equal(t, "p", id.GetProject())
+ assert.Equal(t, "d", id.GetDomain())
+ assert.Equal(t, "v", id.GetVersion())
}
func TestGenerateTaskOutputsFromArtifact_IDLNotFound(t *testing.T) {
diff --git a/flytepropeller/pkg/controller/nodes/common/utils.go b/flytepropeller/pkg/controller/nodes/common/utils.go
index dd16b53f3a..839be0c99f 100644
--- a/flytepropeller/pkg/controller/nodes/common/utils.go
+++ b/flytepropeller/pkg/controller/nodes/common/utils.go
@@ -76,7 +76,7 @@ func GetTargetEntity(ctx context.Context, nCtx interfaces.NodeExecutionContext)
// This doesn't feed a very important part of the node execution event, swallow it for now.
logger.Errorf(ctx, "Failed to get task [%v] with error [%v]", taskID, err)
}
- targetEntity = taskID.CoreTask().Id
+ targetEntity = taskID.CoreTask().GetId()
}
return targetEntity
}
@@ -111,7 +111,7 @@ func OffloadLargeLiteral(ctx context.Context, datastore *storage.DataStore, data
if literalSizeMB >= literalOffloadingConfig.MaxSizeInMBForOffloading {
errString := fmt.Sprintf("Literal size [%d] MB is larger than the max size [%d] MB for offloading", literalSizeMB, literalOffloadingConfig.MaxSizeInMBForOffloading)
logger.Errorf(ctx, errString)
- return fmt.Errorf(errString)
+ return fmt.Errorf(errString) //nolint:govet,staticcheck
}
if literalSizeMB < literalOffloadingConfig.MinSizeInMBForOffloading {
logger.Debugf(ctx, "Literal size [%d] MB is smaller than the min size [%d] MB for offloading", literalSizeMB, literalOffloadingConfig.MinSizeInMBForOffloading)
@@ -122,7 +122,7 @@ func OffloadLargeLiteral(ctx context.Context, datastore *storage.DataStore, data
if inferredType == nil {
errString := "Failed to determine literal type for offloaded literal"
logger.Errorf(ctx, errString)
- return fmt.Errorf(errString)
+ return fmt.Errorf(errString) //nolint:govet,staticcheck
}
// offload the literal
@@ -145,7 +145,7 @@ func OffloadLargeLiteral(ctx context.Context, datastore *storage.DataStore, data
toBeOffloaded.Value = &idlcore.Literal_OffloadedMetadata{
OffloadedMetadata: &idlcore.LiteralOffloadedMetadata{
Uri: dataReference.String(),
- SizeBytes: uint64(literalSizeBytes),
+ SizeBytes: uint64(literalSizeBytes), // #nosec G115
InferredType: inferredType,
},
}
diff --git a/flytepropeller/pkg/controller/nodes/common/utils_test.go b/flytepropeller/pkg/controller/nodes/common/utils_test.go
index 875ede858b..bde50c8040 100644
--- a/flytepropeller/pkg/controller/nodes/common/utils_test.go
+++ b/flytepropeller/pkg/controller/nodes/common/utils_test.go
@@ -147,8 +147,8 @@ func TestOffloadLargeLiteral(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, "foo/bar", toBeOffloaded.GetOffloadedMetadata().GetUri())
assert.Equal(t, uint64(6), toBeOffloaded.GetOffloadedMetadata().GetSizeBytes())
- assert.Equal(t, inferredType.GetSimple(), toBeOffloaded.GetOffloadedMetadata().InferredType.GetSimple())
- assert.Equal(t, base64.RawURLEncoding.EncodeToString(expectedLiteralDigest), toBeOffloaded.Hash)
+ assert.Equal(t, inferredType.GetSimple(), toBeOffloaded.GetOffloadedMetadata().GetInferredType().GetSimple())
+ assert.Equal(t, base64.RawURLEncoding.EncodeToString(expectedLiteralDigest), toBeOffloaded.GetHash())
})
t.Run("offload successful with valid size and hash passed in", func(t *testing.T) {
@@ -175,7 +175,7 @@ func TestOffloadLargeLiteral(t *testing.T) {
}
err := OffloadLargeLiteral(ctx, datastore, dataReference, toBeOffloaded, literalOffloadingConfig)
assert.NoError(t, err)
- assert.Equal(t, "hash", toBeOffloaded.Hash)
+ assert.Equal(t, "hash", toBeOffloaded.GetHash())
})
t.Run("offload fails with size larger than max", func(t *testing.T) {
diff --git a/flytepropeller/pkg/controller/nodes/dynamic/dynamic_workflow.go b/flytepropeller/pkg/controller/nodes/dynamic/dynamic_workflow.go
index 95e8f4c0bb..de0108d4dc 100644
--- a/flytepropeller/pkg/controller/nodes/dynamic/dynamic_workflow.go
+++ b/flytepropeller/pkg/controller/nodes/dynamic/dynamic_workflow.go
@@ -46,8 +46,8 @@ func setEphemeralNodeExecutionStatusAttributes(ctx context.Context, djSpec *core
// We keep track of the original node ids because that's where flytekit inputs are written to in the case of legacy
// map tasks. The modern map tasks do not write input files any longer and this entire piece of code can be removed.
parentNodeID := nCtx.NodeID()
- for _, node := range djSpec.Nodes {
- nodeID := node.Id
+ for _, node := range djSpec.GetNodes() {
+ nodeID := node.GetId()
var subNodeStatus v1alpha1.ExecutableNodeStatus
newID, err := hierarchicalNodeID(parentNodeID, currentAttemptStr, nodeID)
if err != nil {
@@ -98,16 +98,16 @@ func (d dynamicNodeTaskNodeHandler) buildDynamicWorkflowTemplate(ctx context.Con
return nil, errors.Wrapf("TaskReadFailed", err, "Failed to find task [%v].", nCtx.TaskReader().GetTaskID())
}
- for _, t := range djSpec.Tasks {
+ for _, t := range djSpec.GetTasks() {
if t.GetContainer() != nil && parentTask.GetContainer() != nil {
- t.GetContainer().Config = append(t.GetContainer().Config, parentTask.GetContainer().Config...)
+ t.GetContainer().Config = append(t.GetContainer().Config, parentTask.GetContainer().GetConfig()...)
}
}
}
if nCtx.ExecutionContext().GetEventVersion() == v1alpha1.EventVersion0 {
- for _, o := range djSpec.Outputs {
- err = updateBindingNodeIDsWithLineage(parentNodeID, currentAttemptStr, o.Binding)
+ for _, o := range djSpec.GetOutputs() {
+ err = updateBindingNodeIDsWithLineage(parentNodeID, currentAttemptStr, o.GetBinding())
if err != nil {
return nil, err
}
@@ -115,14 +115,14 @@ func (d dynamicNodeTaskNodeHandler) buildDynamicWorkflowTemplate(ctx context.Con
}
return &core.WorkflowTemplate{
Id: &core.Identifier{
- Project: nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetExecutionId().Project,
- Domain: nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetExecutionId().Domain,
- Name: fmt.Sprintf(dynamicWfNameTemplate, nCtx.NodeExecutionMetadata().GetNodeExecutionID().NodeId),
+ Project: nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetExecutionId().GetProject(),
+ Domain: nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetExecutionId().GetDomain(),
+ Name: fmt.Sprintf(dynamicWfNameTemplate, nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetNodeId()),
Version: rand.String(10),
ResourceType: core.ResourceType_WORKFLOW,
},
- Nodes: djSpec.Nodes,
- Outputs: djSpec.Outputs,
+ Nodes: djSpec.GetNodes(),
+ Outputs: djSpec.GetOutputs(),
Interface: iface,
}, nil
}
@@ -228,14 +228,14 @@ func (d dynamicNodeTaskNodeHandler) buildDynamicWorkflow(ctx context.Context, nC
return nil, nil, errors.Wrapf(utils.ErrorCodeSystem, err, "failed to build dynamic workflow template")
}
- compiledTasks, err := compileTasks(ctx, djSpec.Tasks)
+ compiledTasks, err := compileTasks(ctx, djSpec.GetTasks())
if err != nil {
return nil, nil, errors.Wrapf(utils.ErrorCodeUser, err, "failed to compile dynamic tasks")
}
// Get the requirements, that is, a list of all the task IDs and the launch plan IDs that will be called as part of this dynamic task.
// The definition of these will need to be fetched from Admin (in order to get the interface).
- requirements, err := compiler.GetRequirements(wf, djSpec.Subworkflows)
+ requirements, err := compiler.GetRequirements(wf, djSpec.GetSubworkflows())
if err != nil {
return nil, nil, errors.Wrapf(utils.ErrorCodeUser, err, "failed to Get requirements for subworkflows")
}
@@ -251,7 +251,7 @@ func (d dynamicNodeTaskNodeHandler) buildDynamicWorkflow(ctx context.Context, nC
// See https://github.com/flyteorg/flyte/issues/219 for more information.
var closure *core.CompiledWorkflowClosure
- closure, err = compiler.CompileWorkflow(wf, djSpec.Subworkflows, compiledTasks, launchPlanInterfaces)
+ closure, err = compiler.CompileWorkflow(wf, djSpec.GetSubworkflows(), compiledTasks, launchPlanInterfaces)
if err != nil {
return nil, nil, errors.Wrapf(utils.ErrorCodeUser, err, "malformed dynamic workflow")
}
@@ -348,10 +348,10 @@ func (d dynamicNodeTaskNodeHandler) getLaunchPlanInterfaces(ctx context.Context,
logger.Debugf(ctx, "Error fetching launch plan definition from admin")
if launchplan.IsNotFound(err) || launchplan.IsUserError(err) {
return nil, errors.Wrapf(utils.ErrorCodeUser, err, "incorrectly specified launchplan %s:%s:%s:%s",
- id.Project, id.Domain, id.Name, id.Version)
+ id.GetProject(), id.GetDomain(), id.GetName(), id.GetVersion())
}
return nil, errors.Wrapf(utils.ErrorCodeSystem, err, "unable to retrieve launchplan information %s:%s:%s:%s",
- id.Project, id.Domain, id.Name, id.Version)
+ id.GetProject(), id.GetDomain(), id.GetName(), id.GetVersion())
}
launchPlanInterfaces[idx] = compiler.NewLaunchPlanInterfaceProvider(lp)
}
diff --git a/flytepropeller/pkg/controller/nodes/dynamic/dynamic_workflow_test.go b/flytepropeller/pkg/controller/nodes/dynamic/dynamic_workflow_test.go
index 3cb27dd65f..ec20c14cd0 100644
--- a/flytepropeller/pkg/controller/nodes/dynamic/dynamic_workflow_test.go
+++ b/flytepropeller/pkg/controller/nodes/dynamic/dynamic_workflow_test.go
@@ -532,7 +532,7 @@ func Test_dynamicNodeHandler_buildContextualDynamicWorkflow_withLaunchPlans(t *t
}
mockLPLauncher := &mocks5.Reader{}
mockLPLauncher.OnGetLaunchPlanMatch(mock.Anything, mock.MatchedBy(func(id *core.Identifier) bool {
- return lpID.Name == id.Name && lpID.Domain == id.Domain && lpID.Project == id.Project && lpID.ResourceType == id.ResourceType
+ return lpID.GetName() == id.GetName() && lpID.GetDomain() == id.GetDomain() && lpID.GetProject() == id.GetProject() && lpID.GetResourceType() == id.GetResourceType()
})).Return(&admin.LaunchPlan{
Id: lpID,
Closure: &admin.LaunchPlanClosure{
diff --git a/flytepropeller/pkg/controller/nodes/dynamic/handler.go b/flytepropeller/pkg/controller/nodes/dynamic/handler.go
index e23f145bb3..d8977eacdb 100644
--- a/flytepropeller/pkg/controller/nodes/dynamic/handler.go
+++ b/flytepropeller/pkg/controller/nodes/dynamic/handler.go
@@ -103,9 +103,9 @@ func (d dynamicNodeTaskNodeHandler) produceDynamicWorkflow(ctx context.Context,
return handler.Transition{}, handler.DynamicNodeState{}, err
}
taskNodeInfoMetadata := &event.TaskNodeMetadata{}
- if dCtx.subWorkflowClosure != nil && dCtx.subWorkflowClosure.Primary != nil && dCtx.subWorkflowClosure.Primary.Template != nil {
+ if dCtx.subWorkflowClosure != nil && dCtx.subWorkflowClosure.GetPrimary() != nil && dCtx.subWorkflowClosure.GetPrimary().GetTemplate() != nil {
taskNodeInfoMetadata.DynamicWorkflow = &event.DynamicWorkflowNodeMetadata{
- Id: dCtx.subWorkflowClosure.Primary.Template.Id,
+ Id: dCtx.subWorkflowClosure.GetPrimary().GetTemplate().GetId(),
CompiledWorkflow: dCtx.subWorkflowClosure,
DynamicJobSpecUri: dCtx.dynamicJobSpecURI,
}
diff --git a/flytepropeller/pkg/controller/nodes/dynamic/utils.go b/flytepropeller/pkg/controller/nodes/dynamic/utils.go
index 690cbe06a1..6a75e551c7 100644
--- a/flytepropeller/pkg/controller/nodes/dynamic/utils.go
+++ b/flytepropeller/pkg/controller/nodes/dynamic/utils.go
@@ -21,7 +21,7 @@ func underlyingInterface(ctx context.Context, taskReader interfaces.TaskReader)
}
if t.GetInterface() != nil {
- iface.Outputs = t.GetInterface().Outputs
+ iface.Outputs = t.GetInterface().GetOutputs()
}
return iface, nil
}
@@ -31,21 +31,21 @@ func hierarchicalNodeID(parentNodeID, retryAttempt, nodeID string) (string, erro
}
func updateBindingNodeIDsWithLineage(parentNodeID, retryAttempt string, binding *core.BindingData) (err error) {
- switch b := binding.Value.(type) {
+ switch b := binding.GetValue().(type) {
case *core.BindingData_Promise:
- b.Promise.NodeId, err = hierarchicalNodeID(parentNodeID, retryAttempt, b.Promise.NodeId)
+ b.Promise.NodeId, err = hierarchicalNodeID(parentNodeID, retryAttempt, b.Promise.GetNodeId())
if err != nil {
return err
}
case *core.BindingData_Collection:
- for _, item := range b.Collection.Bindings {
+ for _, item := range b.Collection.GetBindings() {
err = updateBindingNodeIDsWithLineage(parentNodeID, retryAttempt, item)
if err != nil {
return err
}
}
case *core.BindingData_Map:
- for _, item := range b.Map.Bindings {
+ for _, item := range b.Map.GetBindings() {
err = updateBindingNodeIDsWithLineage(parentNodeID, retryAttempt, item)
if err != nil {
return err
@@ -60,7 +60,7 @@ func compileTasks(_ context.Context, tasks []*core.TaskTemplate) ([]*core.Compil
compiledTasks := make([]*core.CompiledTask, 0, len(tasks))
visitedTasks := sets.NewString()
for _, t := range tasks {
- if visitedTasks.Has(t.Id.String()) {
+ if visitedTasks.Has(t.GetId().String()) {
continue
}
@@ -70,7 +70,7 @@ func compileTasks(_ context.Context, tasks []*core.TaskTemplate) ([]*core.Compil
}
compiledTasks = append(compiledTasks, ct)
- visitedTasks.Insert(t.Id.String())
+ visitedTasks.Insert(t.GetId().String())
}
return compiledTasks, nil
diff --git a/flytepropeller/pkg/controller/nodes/dynamic/utils_test.go b/flytepropeller/pkg/controller/nodes/dynamic/utils_test.go
index 6afdd487b9..ecc1904c49 100644
--- a/flytepropeller/pkg/controller/nodes/dynamic/utils_test.go
+++ b/flytepropeller/pkg/controller/nodes/dynamic/utils_test.go
@@ -62,5 +62,5 @@ func TestUnderlyingInterface(t *testing.T) {
iface, err = underlyingInterface(context.TODO(), tr)
assert.NoError(t, err)
assert.NotNil(t, iface)
- assert.Nil(t, iface.Outputs)
+ assert.Nil(t, iface.GetOutputs())
}
diff --git a/flytepropeller/pkg/controller/nodes/executor.go b/flytepropeller/pkg/controller/nodes/executor.go
index b25ad64fb6..04adfc5d54 100644
--- a/flytepropeller/pkg/controller/nodes/executor.go
+++ b/flytepropeller/pkg/controller/nodes/executor.go
@@ -522,19 +522,19 @@ func (c *nodeExecutor) RecordTransitionLatency(ctx context.Context, dag executor
func (c *nodeExecutor) recoverInputs(ctx context.Context, nCtx interfaces.NodeExecutionContext,
recovered *admin.NodeExecution, recoveredData *admin.NodeExecutionGetDataResponse) (*core.LiteralMap, error) {
- nodeInputs := recoveredData.FullInputs
+ nodeInputs := recoveredData.GetFullInputs()
if nodeInputs != nil {
if err := c.store.WriteProtobuf(ctx, nCtx.InputReader().GetInputPath(), storage.Options{}, nodeInputs); err != nil {
c.metrics.InputsWriteFailure.Inc(ctx)
logger.Errorf(ctx, "Failed to move recovered inputs for Node. Error [%v]. InputsFile [%s]", err, nCtx.InputReader().GetInputPath())
return nil, errors.Wrapf(errors.StorageError, nCtx.NodeID(), err, "Failed to store inputs for Node. InputsFile [%s]", nCtx.InputReader().GetInputPath())
}
- } else if len(recovered.InputUri) > 0 {
+ } else if len(recovered.GetInputUri()) > 0 {
// If the inputs are too large they won't be returned inline in the RecoverData call. We must fetch them before copying them.
nodeInputs = &core.LiteralMap{}
- if recoveredData.FullInputs == nil {
- if err := c.store.ReadProtobuf(ctx, storage.DataReference(recovered.InputUri), nodeInputs); err != nil {
- return nil, errors.Wrapf(errors.InputsNotFoundError, nCtx.NodeID(), err, "failed to read data from dataDir [%v].", recovered.InputUri)
+ if recoveredData.GetFullInputs() == nil {
+ if err := c.store.ReadProtobuf(ctx, storage.DataReference(recovered.GetInputUri()), nodeInputs); err != nil {
+ return nil, errors.Wrapf(errors.InputsNotFoundError, nCtx.NodeID(), err, "failed to read data from dataDir [%v].", recovered.GetInputUri())
}
}
@@ -549,11 +549,11 @@ func (c *nodeExecutor) recoverInputs(ctx context.Context, nCtx interfaces.NodeEx
}
func (c *nodeExecutor) attemptRecovery(ctx context.Context, nCtx interfaces.NodeExecutionContext) (handler.PhaseInfo, error) {
- fullyQualifiedNodeID := nCtx.NodeExecutionMetadata().GetNodeExecutionID().NodeId
+ fullyQualifiedNodeID := nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetNodeId()
if nCtx.ExecutionContext().GetEventVersion() != v1alpha1.EventVersion0 {
// compute fully qualified node id (prefixed with parent id and retry attempt) to ensure uniqueness
var err error
- fullyQualifiedNodeID, err = common.GenerateUniqueID(nCtx.ExecutionContext().GetParentInfo(), nCtx.NodeExecutionMetadata().GetNodeExecutionID().NodeId)
+ fullyQualifiedNodeID, err = common.GenerateUniqueID(nCtx.ExecutionContext().GetParentInfo(), nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetNodeId())
if err != nil {
return handler.PhaseInfoUndefined, err
}
@@ -572,13 +572,13 @@ func (c *nodeExecutor) attemptRecovery(ctx context.Context, nCtx interfaces.Node
logger.Warnf(ctx, "call to recover node [%+v] returned no error but also no node", nCtx.NodeExecutionMetadata().GetNodeExecutionID())
return handler.PhaseInfoUndefined, nil
}
- if recovered.Closure == nil {
+ if recovered.GetClosure() == nil {
logger.Warnf(ctx, "Fetched node execution [%+v] data but was missing closure. Will not attempt to recover",
nCtx.NodeExecutionMetadata().GetNodeExecutionID())
return handler.PhaseInfoUndefined, nil
}
// A recoverable node execution should always be in a terminal phase
- switch recovered.Closure.Phase {
+ switch recovered.GetClosure().GetPhase() {
case core.NodeExecution_SKIPPED:
return handler.PhaseInfoUndefined, nil
case core.NodeExecution_SUCCEEDED:
@@ -588,9 +588,9 @@ func (c *nodeExecutor) attemptRecovery(ctx context.Context, nCtx interfaces.Node
default:
// The node execution may be partially recoverable through intra task checkpointing. Save the checkpoint
// uri in the task node state to pass to the task handler later on.
- if metadata, ok := recovered.Closure.TargetMetadata.(*admin.NodeExecutionClosure_TaskNodeMetadata); ok {
+ if metadata, ok := recovered.GetClosure().GetTargetMetadata().(*admin.NodeExecutionClosure_TaskNodeMetadata); ok {
state := nCtx.NodeStateReader().GetTaskNodeState()
- state.PreviousNodeExecutionCheckpointURI = storage.DataReference(metadata.TaskNodeMetadata.CheckpointUri)
+ state.PreviousNodeExecutionCheckpointURI = storage.DataReference(metadata.TaskNodeMetadata.GetCheckpointUri())
err = nCtx.NodeStateWriter().PutTaskNodeState(state)
if err != nil {
logger.Warnf(ctx, "failed to save recovered checkpoint uri for [%+v]: [%+v]",
@@ -601,7 +601,7 @@ func (c *nodeExecutor) attemptRecovery(ctx context.Context, nCtx interfaces.Node
// if this node is a dynamic task we attempt to recover the compiled workflow from instances where the parent
// task succeeded but the dynamic task did not complete. this is important to ensure correctness since node ids
// within the compiled closure may not be generated deterministically.
- if recovered.Metadata != nil && recovered.Metadata.IsDynamic && len(recovered.Closure.DynamicJobSpecUri) > 0 {
+ if recovered.GetMetadata() != nil && recovered.GetMetadata().GetIsDynamic() && len(recovered.GetClosure().GetDynamicJobSpecUri()) > 0 {
// recover node inputs
recoveredData, err := c.recoveryClient.RecoverNodeExecutionData(ctx,
nCtx.ExecutionContext().GetExecutionConfig().RecoveryExecution.WorkflowExecutionIdentifier, fullyQualifiedNodeID)
@@ -619,7 +619,7 @@ func (c *nodeExecutor) attemptRecovery(ctx context.Context, nCtx interfaces.Node
return handler.PhaseInfoUndefined, err
}
- dynamicJobSpecReference := storage.DataReference(recovered.Closure.DynamicJobSpecUri)
+ dynamicJobSpecReference := storage.DataReference(recovered.GetClosure().GetDynamicJobSpecUri())
if err := nCtx.DataStore().CopyRaw(ctx, dynamicJobSpecReference, f.GetLoc(), storage.Options{}); err != nil {
return handler.PhaseInfoUndefined, errors.Wrapf(errors.StorageError, nCtx.NodeID(), err,
"failed to store dynamic job spec for node. source file [%s] destination file [%s]", dynamicJobSpecReference, f.GetLoc())
@@ -635,7 +635,7 @@ func (c *nodeExecutor) attemptRecovery(ctx context.Context, nCtx interfaces.Node
return handler.PhaseInfoRunning(&handler.ExecutionInfo{}), nil
}
- logger.Debugf(ctx, "Node [%+v] phase [%v] is not recoverable", nCtx.NodeExecutionMetadata().GetNodeExecutionID(), recovered.Closure.Phase)
+ logger.Debugf(ctx, "Node [%+v] phase [%v] is not recoverable", nCtx.NodeExecutionMetadata().GetNodeExecutionID(), recovered.GetClosure().GetPhase())
return handler.PhaseInfoUndefined, nil
}
@@ -662,13 +662,13 @@ func (c *nodeExecutor) attemptRecovery(ctx context.Context, nCtx interfaces.Node
// Similarly, copy outputs' reference
so := storage.Options{}
var outputs = &core.LiteralMap{}
- if recoveredData.FullOutputs != nil {
- outputs = recoveredData.FullOutputs
- } else if recovered.Closure.GetOutputData() != nil {
- outputs = recovered.Closure.GetOutputData()
- } else if len(recovered.Closure.GetOutputUri()) > 0 {
- if err := c.store.ReadProtobuf(ctx, storage.DataReference(recovered.Closure.GetOutputUri()), outputs); err != nil {
- return handler.PhaseInfoUndefined, errors.Wrapf(errors.InputsNotFoundError, nCtx.NodeID(), err, "failed to read output data [%v].", recovered.Closure.GetOutputUri())
+ if recoveredData.GetFullOutputs() != nil {
+ outputs = recoveredData.GetFullOutputs()
+ } else if recovered.GetClosure().GetOutputData() != nil {
+ outputs = recovered.GetClosure().GetOutputData()
+ } else if len(recovered.GetClosure().GetOutputUri()) > 0 {
+ if err := c.store.ReadProtobuf(ctx, storage.DataReference(recovered.GetClosure().GetOutputUri()), outputs); err != nil {
+ return handler.PhaseInfoUndefined, errors.Wrapf(errors.InputsNotFoundError, nCtx.NodeID(), err, "failed to read output data [%v].", recovered.GetClosure().GetOutputUri())
}
} else {
logger.Debugf(ctx, "No outputs found for recovered node [%+v]", nCtx.NodeExecutionMetadata().GetNodeExecutionID())
@@ -679,7 +679,7 @@ func (c *nodeExecutor) attemptRecovery(ctx context.Context, nCtx interfaces.Node
OutputURI: outputFile,
}
- deckFile := storage.DataReference(recovered.Closure.GetDeckUri())
+ deckFile := storage.DataReference(recovered.GetClosure().GetDeckUri())
if len(deckFile) > 0 {
metadata, err := nCtx.DataStore().Head(ctx, deckFile)
if err != nil {
@@ -702,24 +702,24 @@ func (c *nodeExecutor) attemptRecovery(ctx context.Context, nCtx interfaces.Node
OutputInfo: oi,
}
- if recovered.Closure.GetTaskNodeMetadata() != nil {
+ if recovered.GetClosure().GetTaskNodeMetadata() != nil {
taskNodeInfo := &handler.TaskNodeInfo{
TaskNodeMetadata: &event.TaskNodeMetadata{
- CatalogKey: recovered.Closure.GetTaskNodeMetadata().CatalogKey,
- CacheStatus: recovered.Closure.GetTaskNodeMetadata().CacheStatus,
+ CatalogKey: recovered.GetClosure().GetTaskNodeMetadata().GetCatalogKey(),
+ CacheStatus: recovered.GetClosure().GetTaskNodeMetadata().GetCacheStatus(),
},
}
- if recoveredData.DynamicWorkflow != nil {
+ if recoveredData.GetDynamicWorkflow() != nil {
taskNodeInfo.TaskNodeMetadata.DynamicWorkflow = &event.DynamicWorkflowNodeMetadata{
- Id: recoveredData.DynamicWorkflow.Id,
- CompiledWorkflow: recoveredData.DynamicWorkflow.CompiledWorkflow,
+ Id: recoveredData.GetDynamicWorkflow().GetId(),
+ CompiledWorkflow: recoveredData.GetDynamicWorkflow().GetCompiledWorkflow(),
}
}
info.TaskNodeInfo = taskNodeInfo
- } else if recovered.Closure.GetWorkflowNodeMetadata() != nil {
+ } else if recovered.GetClosure().GetWorkflowNodeMetadata() != nil {
logger.Warnf(ctx, "Attempted to recover node")
info.WorkflowNodeInfo = &handler.WorkflowNodeInfo{
- LaunchedWorkflowID: recovered.Closure.GetWorkflowNodeMetadata().ExecutionId,
+ LaunchedWorkflowID: recovered.GetClosure().GetWorkflowNodeMetadata().GetExecutionId(),
}
}
return handler.PhaseInfoRecovered(info), nil
@@ -765,7 +765,7 @@ func (c *nodeExecutor) preExecute(ctx context.Context, dag executors.DAGStructur
}
if nodeInputs != nil {
- p := common.CheckOffloadingCompat(ctx, nCtx, nodeInputs.Literals, node, c.literalOffloadingConfig)
+ p := common.CheckOffloadingCompat(ctx, nCtx, nodeInputs.GetLiterals(), node, c.literalOffloadingConfig)
if p != nil {
return *p, nil
}
@@ -809,7 +809,7 @@ func (c *nodeExecutor) isEligibleForRetry(nCtx interfaces.NodeExecutionContext,
if config.GetConfig().NodeConfig.IgnoreRetryCause {
currentAttempt = nodeStatus.GetAttempts() + 1
} else {
- if err.Kind == core.ExecutionError_SYSTEM {
+ if err.GetKind() == core.ExecutionError_SYSTEM {
currentAttempt = nodeStatus.GetSystemFailures()
maxAttempts = c.maxNodeRetriesForSystemFailures
isEligible = currentAttempt < c.maxNodeRetriesForSystemFailures
@@ -818,9 +818,9 @@ func (c *nodeExecutor) isEligibleForRetry(nCtx interfaces.NodeExecutionContext,
currentAttempt = (nodeStatus.GetAttempts() + 1) - nodeStatus.GetSystemFailures()
}
- maxAttempts = uint32(config.GetConfig().NodeConfig.DefaultMaxAttempts)
+ maxAttempts = uint32(config.GetConfig().NodeConfig.DefaultMaxAttempts) // #nosec G115
if nCtx.Node().GetRetryStrategy() != nil && nCtx.Node().GetRetryStrategy().MinAttempts != nil && *nCtx.Node().GetRetryStrategy().MinAttempts != 1 {
- maxAttempts = uint32(*nCtx.Node().GetRetryStrategy().MinAttempts)
+ maxAttempts = uint32(*nCtx.Node().GetRetryStrategy().MinAttempts) // #nosec G115
}
isEligible = currentAttempt < maxAttempts
return
@@ -864,8 +864,8 @@ func (c *nodeExecutor) execute(ctx context.Context, h interfaces.NodeHandler, nC
if !isEligible {
return handler.PhaseInfoFailure(
core.ExecutionError_USER,
- fmt.Sprintf("RetriesExhausted|%s", phase.GetErr().Code),
- fmt.Sprintf("[%d/%d] currentAttempt done. Last Error: %s::%s", currentAttempt, maxAttempts, phase.GetErr().Kind.String(), phase.GetErr().Message),
+ fmt.Sprintf("RetriesExhausted|%s", phase.GetErr().GetCode()),
+ fmt.Sprintf("[%d/%d] currentAttempt done. Last Error: %s::%s", currentAttempt, maxAttempts, phase.GetErr().GetKind().String(), phase.GetErr().GetMessage()),
phase.GetInfo(),
), nil
}
@@ -894,11 +894,11 @@ func (c *nodeExecutor) Abort(ctx context.Context, h interfaces.NodeHandler, nCtx
// only send event if this is the final transition for this node
if finalTransition {
nodeExecutionID := &core.NodeExecutionIdentifier{
- ExecutionId: nCtx.NodeExecutionMetadata().GetNodeExecutionID().ExecutionId,
- NodeId: nCtx.NodeExecutionMetadata().GetNodeExecutionID().NodeId,
+ ExecutionId: nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetExecutionId(),
+ NodeId: nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetNodeId(),
}
if nCtx.ExecutionContext().GetEventVersion() != v1alpha1.EventVersion0 {
- currentNodeUniqueID, err := common.GenerateUniqueID(nCtx.ExecutionContext().GetParentInfo(), nodeExecutionID.NodeId)
+ currentNodeUniqueID, err := common.GenerateUniqueID(nCtx.ExecutionContext().GetParentInfo(), nodeExecutionID.GetNodeId())
if err != nil {
return err
}
@@ -1483,7 +1483,7 @@ func NewExecutor(ctx context.Context, nodeConfig config.NodeConfig, store *stora
eventConfig: eventConfig,
literalOffloadingConfig: literalOffloadingConfig,
interruptibleFailureThreshold: nodeConfig.InterruptibleFailureThreshold,
- maxNodeRetriesForSystemFailures: uint32(nodeConfig.MaxNodeRetriesOnSystemFailures),
+ maxNodeRetriesForSystemFailures: uint32(nodeConfig.MaxNodeRetriesOnSystemFailures), // #nosec G115
metrics: metrics,
nodeRecorder: events.NewNodeEventRecorder(eventSink, nodeScope, store),
outputResolver: NewRemoteFileOutputResolver(store),
diff --git a/flytepropeller/pkg/controller/nodes/executor_test.go b/flytepropeller/pkg/controller/nodes/executor_test.go
index 35ab105623..329d52540d 100644
--- a/flytepropeller/pkg/controller/nodes/executor_test.go
+++ b/flytepropeller/pkg/controller/nodes/executor_test.go
@@ -779,7 +779,7 @@ func TestNodeExecutor_RecursiveNodeHandler_Recurse(t *testing.T) {
evRecorder := &eventMocks.NodeEventRecorder{}
evRecorder.OnRecordNodeEventMatch(mock.Anything, mock.MatchedBy(func(ev *event.NodeExecutionEvent) bool {
assert.NotNil(t, ev)
- assert.Equal(t, test.eventPhase, ev.Phase)
+ assert.Equal(t, test.eventPhase, ev.GetPhase())
called = true
return true
}), mock.Anything).Return(nil)
@@ -893,7 +893,7 @@ func TestNodeExecutor_RecursiveNodeHandler_Recurse(t *testing.T) {
evRecorder := &eventMocks.NodeEventRecorder{}
evRecorder.OnRecordNodeEventMatch(mock.Anything, mock.MatchedBy(func(ev *event.NodeExecutionEvent) bool {
assert.NotNil(t, ev)
- assert.Equal(t, test.eventPhase, ev.Phase)
+ assert.Equal(t, test.eventPhase, ev.GetPhase())
called = true
return true
}), mock.Anything).Return(nil)
@@ -939,7 +939,7 @@ func TestNodeExecutor_RecursiveNodeHandler_Recurse(t *testing.T) {
} else {
assert.Nil(t, s.Err)
}
- assert.Equal(t, uint32(test.attempts), mockNodeStatus.GetAttempts())
+ assert.Equal(t, uint32(test.attempts), mockNodeStatus.GetAttempts()) // #nosec G115
assert.Equal(t, test.eventRecorded, called, "event recording expected: %v, but got %v", test.eventRecorded, called)
})
}
@@ -1770,18 +1770,18 @@ func TestNodeExecutionEventStartNode(t *testing.T) {
}, subWfID)
assert.NoError(t, err)
- assert.Equal(t, "start-node", ev.Id.NodeId)
- assert.Equal(t, execID, ev.Id.ExecutionId)
- assert.Empty(t, ev.SpecNodeId)
- assert.Nil(t, ev.ParentNodeMetadata)
- assert.Equal(t, tID, ev.ParentTaskMetadata.Id)
- assert.Empty(t, ev.NodeName)
- assert.Empty(t, ev.RetryGroup)
+ assert.Equal(t, "start-node", ev.GetId().GetNodeId())
+ assert.Equal(t, execID, ev.GetId().GetExecutionId())
+ assert.Empty(t, ev.GetSpecNodeId())
+ assert.Nil(t, ev.GetParentNodeMetadata())
+ assert.Equal(t, tID, ev.GetParentTaskMetadata().GetId())
+ assert.Empty(t, ev.GetNodeName())
+ assert.Empty(t, ev.GetRetryGroup())
assert.Equal(t, "dummy://dummyOutUrl/outputs.pb",
- ev.OutputResult.(*event.NodeExecutionEvent_OutputUri).OutputUri)
- assert.Equal(t, ev.ProducerId, testClusterID)
+ ev.GetOutputResult().(*event.NodeExecutionEvent_OutputUri).OutputUri)
+ assert.Equal(t, ev.GetProducerId(), testClusterID)
assert.Equal(t, subWfID, ev.GetTargetEntity())
- assert.Nil(t, ev.InputValue)
+ assert.Nil(t, ev.GetInputValue())
}
func TestNodeExecutionEventV0(t *testing.T) {
@@ -1817,14 +1817,14 @@ func TestNodeExecutionEventV0(t *testing.T) {
RawOutputPolicy: config.RawOutputPolicyReference,
}, nil)
assert.NoError(t, err)
- assert.Equal(t, "n1", ev.Id.NodeId)
- assert.Equal(t, execID, ev.Id.ExecutionId)
- assert.Empty(t, ev.SpecNodeId)
- assert.Nil(t, ev.ParentNodeMetadata)
- assert.Equal(t, tID, ev.ParentTaskMetadata.Id)
- assert.Empty(t, ev.NodeName)
- assert.Empty(t, ev.RetryGroup)
- assert.Empty(t, ev.TargetEntity)
+ assert.Equal(t, "n1", ev.GetId().GetNodeId())
+ assert.Equal(t, execID, ev.GetId().GetExecutionId())
+ assert.Empty(t, ev.GetSpecNodeId())
+ assert.Nil(t, ev.GetParentNodeMetadata())
+ assert.Equal(t, tID, ev.GetParentTaskMetadata().GetId())
+ assert.Empty(t, ev.GetNodeName())
+ assert.Empty(t, ev.GetRetryGroup())
+ assert.Empty(t, ev.GetTargetEntity())
assert.Equal(t, "reference", ev.GetInputUri())
}
@@ -1870,18 +1870,18 @@ func TestNodeExecutionEventV1(t *testing.T) {
}, nil)
assert.NoError(t, err)
- assert.Equal(t, "np1-2-n1", eventOpt.Id.NodeId)
- assert.Equal(t, execID, eventOpt.Id.ExecutionId)
- assert.Equal(t, "id", eventOpt.SpecNodeId)
+ assert.Equal(t, "np1-2-n1", eventOpt.GetId().GetNodeId())
+ assert.Equal(t, execID, eventOpt.GetId().GetExecutionId())
+ assert.Equal(t, "id", eventOpt.GetSpecNodeId())
expectParentMetadata := event.ParentNodeExecutionMetadata{
NodeId: "np1",
}
- assert.Equal(t, expectParentMetadata, *eventOpt.ParentNodeMetadata)
- assert.Nil(t, eventOpt.ParentTaskMetadata)
- assert.Equal(t, "name", eventOpt.NodeName)
- assert.Equal(t, "2", eventOpt.RetryGroup)
+ assert.True(t, proto.Equal(&expectParentMetadata, eventOpt.GetParentNodeMetadata()))
+ assert.Nil(t, eventOpt.GetParentTaskMetadata())
+ assert.Equal(t, "name", eventOpt.GetNodeName())
+ assert.Equal(t, "2", eventOpt.GetRetryGroup())
assert.True(t, proto.Equal(eventOpt.GetInputData(), inputs))
- assert.Empty(t, eventOpt.TargetEntity)
+ assert.Empty(t, eventOpt.GetTargetEntity())
assert.Equal(t, inputs, eventOpt.GetInputData())
}
@@ -2326,8 +2326,8 @@ func TestRecover(t *testing.T) {
},
CacheStatus: core.CatalogCacheStatus_CACHE_HIT,
DynamicWorkflow: &event.DynamicWorkflowNodeMetadata{
- Id: dynamicWorkflow.Id,
- CompiledWorkflow: dynamicWorkflow.CompiledWorkflow,
+ Id: dynamicWorkflow.GetId(),
+ CompiledWorkflow: dynamicWorkflow.GetCompiledWorkflow(),
},
}, phaseInfo.GetInfo().TaskNodeInfo.TaskNodeMetadata))
})
diff --git a/flytepropeller/pkg/controller/nodes/gate/handler.go b/flytepropeller/pkg/controller/nodes/gate/handler.go
index 00d2cb989f..c1308c8ed3 100644
--- a/flytepropeller/pkg/controller/nodes/gate/handler.go
+++ b/flytepropeller/pkg/controller/nodes/gate/handler.go
@@ -84,7 +84,7 @@ func (g *gateNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecut
request := &admin.SignalGetOrCreateRequest{
Id: &core.SignalIdentifier{
ExecutionId: nCtx.ExecutionContext().GetExecutionID().WorkflowExecutionIdentifier,
- SignalId: approveCondition.SignalId,
+ SignalId: approveCondition.GetSignalId(),
},
Type: &core.LiteralType{
Type: &core.LiteralType_Simple{
@@ -99,10 +99,10 @@ func (g *gateNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecut
}
// if signal has value then check for approval
- if signal.Value != nil && signal.Value.Value != nil {
- approved, ok := getBoolean(signal.Value)
+ if signal.GetValue() != nil && signal.Value.Value != nil {
+ approved, ok := getBoolean(signal.GetValue())
if !ok {
- errMsg := fmt.Sprintf("received a non-boolean approve signal value [%v]", signal.Value)
+ errMsg := fmt.Sprintf("received a non-boolean approve signal value [%v]", signal.GetValue())
return handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoFailure(core.ExecutionError_UNKNOWN,
errors.RuntimeExecutionError, errMsg, nil)), nil
}
@@ -143,9 +143,9 @@ func (g *gateNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecut
request := &admin.SignalGetOrCreateRequest{
Id: &core.SignalIdentifier{
ExecutionId: nCtx.ExecutionContext().GetExecutionID().WorkflowExecutionIdentifier,
- SignalId: signalCondition.SignalId,
+ SignalId: signalCondition.GetSignalId(),
},
- Type: signalCondition.Type,
+ Type: signalCondition.GetType(),
}
signal, err := g.signalClient.GetOrCreateSignal(ctx, request)
@@ -154,10 +154,10 @@ func (g *gateNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecut
}
// if signal has value then write to output and transition to success
- if signal.Value != nil && signal.Value.Value != nil {
+ if signal.GetValue() != nil && signal.Value.Value != nil {
outputs := &core.LiteralMap{
Literals: map[string]*core.Literal{
- signalCondition.OutputVariableName: signal.Value,
+ signalCondition.GetOutputVariableName(): signal.GetValue(),
},
}
@@ -218,9 +218,9 @@ func New(eventConfig *config.EventConfig, signalClient service.SignalServiceClie
}
func getBoolean(literal *core.Literal) (bool, bool) {
- if scalarValue, ok := literal.Value.(*core.Literal_Scalar); ok {
- if primitiveValue, ok := scalarValue.Scalar.Value.(*core.Scalar_Primitive); ok {
- if booleanValue, ok := primitiveValue.Primitive.Value.(*core.Primitive_Boolean); ok {
+ if scalarValue, ok := literal.GetValue().(*core.Literal_Scalar); ok {
+ if primitiveValue, ok := scalarValue.Scalar.GetValue().(*core.Scalar_Primitive); ok {
+ if booleanValue, ok := primitiveValue.Primitive.GetValue().(*core.Primitive_Boolean); ok {
return booleanValue.Boolean, true
}
}
diff --git a/flytepropeller/pkg/controller/nodes/handler/transition_info.go b/flytepropeller/pkg/controller/nodes/handler/transition_info.go
index c9af525cca..7e787a9424 100644
--- a/flytepropeller/pkg/controller/nodes/handler/transition_info.go
+++ b/flytepropeller/pkg/controller/nodes/handler/transition_info.go
@@ -173,7 +173,7 @@ func phaseInfoFailed(p EPhase, err *core.ExecutionError, info *ExecutionInfo) Ph
}
}
- return phaseInfo(p, err, info, err.Message)
+ return phaseInfo(p, err, info, err.GetMessage())
}
func PhaseInfoFailure(kind core.ExecutionError_ErrorKind, code, reason string, info *ExecutionInfo) PhaseInfo {
diff --git a/flytepropeller/pkg/controller/nodes/handler/transition_info_test.go b/flytepropeller/pkg/controller/nodes/handler/transition_info_test.go
index 883dbd5f45..91042fc588 100644
--- a/flytepropeller/pkg/controller/nodes/handler/transition_info_test.go
+++ b/flytepropeller/pkg/controller/nodes/handler/transition_info_test.go
@@ -110,8 +110,8 @@ func TestPhaseInfo(t *testing.T) {
assert.Equal(t, EPhaseFailed, p.GetPhase())
assert.Equal(t, i, p.GetInfo())
if assert.NotNil(t, p.GetErr()) {
- assert.Equal(t, "code", p.GetErr().Code)
- assert.Equal(t, "reason", p.GetErr().Message)
+ assert.Equal(t, "code", p.GetErr().GetCode())
+ assert.Equal(t, "reason", p.GetErr().GetMessage())
}
assert.NotNil(t, p.GetOccurredAt())
})
@@ -141,8 +141,8 @@ func TestPhaseInfo(t *testing.T) {
assert.Equal(t, EPhaseRetryableFailure, p.GetPhase())
assert.Equal(t, i, p.GetInfo())
if assert.NotNil(t, p.GetErr()) {
- assert.Equal(t, "code", p.GetErr().Code)
- assert.Equal(t, "reason", p.GetErr().Message)
+ assert.Equal(t, "code", p.GetErr().GetCode())
+ assert.Equal(t, "reason", p.GetErr().GetMessage())
}
assert.NotNil(t, p.GetOccurredAt())
})
diff --git a/flytepropeller/pkg/controller/nodes/node_exec_context.go b/flytepropeller/pkg/controller/nodes/node_exec_context.go
index 7de31100c6..9721d2af6c 100644
--- a/flytepropeller/pkg/controller/nodes/node_exec_context.go
+++ b/flytepropeller/pkg/controller/nodes/node_exec_context.go
@@ -39,16 +39,16 @@ func (e eventRecorder) RecordTaskEvent(ctx context.Context, ev *event.TaskExecut
if eventConfig.ErrorOnAlreadyExists {
return err
}
- logger.Warningf(ctx, "Failed to record taskEvent, error [%s]. Trying to record state: %s. Ignoring this error!", err.Error(), ev.Phase)
+ logger.Warningf(ctx, "Failed to record taskEvent, error [%s]. Trying to record state: %s. Ignoring this error!", err.Error(), ev.GetPhase())
return nil
} else if eventsErr.IsEventAlreadyInTerminalStateError(err) {
- if IsTerminalTaskPhase(ev.Phase) {
+ if IsTerminalTaskPhase(ev.GetPhase()) {
// Event is terminal and the stored value in flyteadmin is already terminal. This implies aborted case. So ignoring
- logger.Warningf(ctx, "Failed to record taskEvent, error [%s]. Trying to record state: %s. Ignoring this error!", err.Error(), ev.Phase)
+ logger.Warningf(ctx, "Failed to record taskEvent, error [%s]. Trying to record state: %s. Ignoring this error!", err.Error(), ev.GetPhase())
return nil
}
- logger.Warningf(ctx, "Failed to record taskEvent in state: %s, error: %s", ev.Phase, err)
- return errors.Wrapf(err, "failed to record task event, as it already exists in terminal state. Event state: %s", ev.Phase)
+ logger.Warningf(ctx, "Failed to record taskEvent in state: %s, error: %s", ev.GetPhase(), err)
+ return errors.Wrapf(err, "failed to record task event, as it already exists in terminal state. Event state: %s", ev.GetPhase())
}
return err
}
@@ -60,30 +60,30 @@ func (e eventRecorder) RecordNodeEvent(ctx context.Context, nodeEvent *event.Nod
return fmt.Errorf("event recording attempt of Nil Node execution event")
}
- if nodeEvent.Id == nil {
+ if nodeEvent.GetId() == nil {
return fmt.Errorf("event recording attempt of with nil node Event ID")
}
- logger.Infof(ctx, "Recording NodeEvent [%s] phase[%s]", nodeEvent.GetId().String(), nodeEvent.Phase.String())
+ logger.Infof(ctx, "Recording NodeEvent [%s] phase[%s]", nodeEvent.GetId().String(), nodeEvent.GetPhase().String())
err := e.nodeEventRecorder.RecordNodeEvent(ctx, nodeEvent, eventConfig)
if err != nil {
- if nodeEvent.GetId().NodeId == v1alpha1.EndNodeID {
+ if nodeEvent.GetId().GetNodeId() == v1alpha1.EndNodeID {
return nil
}
if eventsErr.IsAlreadyExists(err) {
logger.Infof(ctx, "Node event phase: %s, nodeId %s already exist",
- nodeEvent.Phase.String(), nodeEvent.GetId().NodeId)
+ nodeEvent.GetPhase().String(), nodeEvent.GetId().GetNodeId())
return nil
} else if eventsErr.IsEventAlreadyInTerminalStateError(err) {
- if IsTerminalNodePhase(nodeEvent.Phase) {
+ if IsTerminalNodePhase(nodeEvent.GetPhase()) {
// Event was trying to record a different terminal phase for an already terminal event. ignoring.
logger.Infof(ctx, "Node event phase: %s, nodeId %s already in terminal phase. err: %s",
- nodeEvent.Phase.String(), nodeEvent.GetId().NodeId, err.Error())
+ nodeEvent.GetPhase().String(), nodeEvent.GetId().GetNodeId(), err.Error())
return nil
}
logger.Warningf(ctx, "Failed to record nodeEvent, error [%s]", err.Error())
- return nodeerrors.Wrapf(nodeerrors.IllegalStateError, nodeEvent.Id.NodeId, err, "phase mismatch mismatch between propeller and control plane; Trying to record Node p: %s", nodeEvent.Phase)
+ return nodeerrors.Wrapf(nodeerrors.IllegalStateError, nodeEvent.GetId().GetNodeId(), err, "phase mismatch mismatch between propeller and control plane; Trying to record Node p: %s", nodeEvent.GetPhase())
}
}
return err
@@ -223,7 +223,7 @@ func newNodeExecContext(_ context.Context, store *storage.DataStore, execContext
}
nodeLabels[NodeIDLabel] = utils.SanitizeLabelValue(node.GetID())
if tr != nil && tr.GetTaskID() != nil {
- nodeLabels[TaskNameLabel] = utils.SanitizeLabelValue(tr.GetTaskID().Name)
+ nodeLabels[TaskNameLabel] = utils.SanitizeLabelValue(tr.GetTaskID().GetName())
}
nodeLabels[NodeInterruptibleLabel] = strconv.FormatBool(interruptible)
md.nodeLabels = nodeLabels
@@ -290,9 +290,9 @@ func (c *nodeExecutor) BuildNodeExecutionContext(ctx context.Context, executionC
if config.GetConfig().NodeConfig.IgnoreRetryCause {
// For the unified retry behavior we execute the last interruptibleFailureThreshold attempts on a non
// interruptible machine
- maxAttempts := uint32(config.GetConfig().NodeConfig.DefaultMaxAttempts)
+ maxAttempts := uint32(config.GetConfig().NodeConfig.DefaultMaxAttempts) // #nosec G115
if n.GetRetryStrategy() != nil && n.GetRetryStrategy().MinAttempts != nil && *n.GetRetryStrategy().MinAttempts != 1 {
- maxAttempts = uint32(*n.GetRetryStrategy().MinAttempts)
+ maxAttempts = uint32(*n.GetRetryStrategy().MinAttempts) // #nosec G115
}
// For interruptible nodes run at least one attempt on an interruptible machine (thus s.GetAttempts() > 0) even if there won't be any retries
diff --git a/flytepropeller/pkg/controller/nodes/node_exec_context_test.go b/flytepropeller/pkg/controller/nodes/node_exec_context_test.go
index 4614d0f035..2f421a7a7b 100644
--- a/flytepropeller/pkg/controller/nodes/node_exec_context_test.go
+++ b/flytepropeller/pkg/controller/nodes/node_exec_context_test.go
@@ -147,10 +147,10 @@ func Test_NodeContextDefault(t *testing.T) {
// Test that retrieving task nodes
taskIdentifier := common.GetTargetEntity(ctx, nodeExecContext)
- assert.Equal(t, w1.Tasks["taskID"].TaskTemplate.Id.Project, taskIdentifier.Project)
- assert.Equal(t, w1.Tasks["taskID"].TaskTemplate.Id.Domain, taskIdentifier.Domain)
- assert.Equal(t, w1.Tasks["taskID"].TaskTemplate.Id.Name, taskIdentifier.Name)
- assert.Equal(t, w1.Tasks["taskID"].TaskTemplate.Id.Version, taskIdentifier.Version)
+ assert.Equal(t, w1.Tasks["taskID"].TaskTemplate.GetId().GetProject(), taskIdentifier.GetProject())
+ assert.Equal(t, w1.Tasks["taskID"].TaskTemplate.GetId().GetDomain(), taskIdentifier.GetDomain())
+ assert.Equal(t, w1.Tasks["taskID"].TaskTemplate.GetId().GetName(), taskIdentifier.GetName())
+ assert.Equal(t, w1.Tasks["taskID"].TaskTemplate.GetId().GetVersion(), taskIdentifier.GetVersion())
}
func TestGetTargetEntity_LaunchPlanNode(t *testing.T) {
@@ -173,10 +173,10 @@ func TestGetTargetEntity_LaunchPlanNode(t *testing.T) {
nCtx.OnNode().Return(n)
fetchedID := common.GetTargetEntity(context.Background(), nCtx)
- assert.Equal(t, id.Project, fetchedID.Project)
- assert.Equal(t, id.Domain, fetchedID.Domain)
- assert.Equal(t, id.Name, fetchedID.Name)
- assert.Equal(t, id.Version, fetchedID.Version)
+ assert.Equal(t, id.GetProject(), fetchedID.GetProject())
+ assert.Equal(t, id.GetDomain(), fetchedID.GetDomain())
+ assert.Equal(t, id.GetName(), fetchedID.GetName())
+ assert.Equal(t, id.GetVersion(), fetchedID.GetVersion())
}
func TestGetTargetEntity_EmptyTask(t *testing.T) {
diff --git a/flytepropeller/pkg/controller/nodes/node_state_manager.go b/flytepropeller/pkg/controller/nodes/node_state_manager.go
index a9ead9afc3..fd74d107a0 100644
--- a/flytepropeller/pkg/controller/nodes/node_state_manager.go
+++ b/flytepropeller/pkg/controller/nodes/node_state_manager.go
@@ -80,7 +80,7 @@ func (n nodeStateManager) GetTaskNodeState() handler.TaskNodeState {
tn := n.nodeStatus.GetTaskNodeStatus()
if tn != nil {
return handler.TaskNodeState{
- PluginPhase: pluginCore.Phase(tn.GetPhase()),
+ PluginPhase: pluginCore.Phase(tn.GetPhase()), // #nosec G115
PluginPhaseVersion: tn.GetPhaseVersion(),
PluginStateVersion: tn.GetPluginStateVersion(),
PluginState: tn.GetPluginState(),
diff --git a/flytepropeller/pkg/controller/nodes/output_resolver.go b/flytepropeller/pkg/controller/nodes/output_resolver.go
index df8a6dfe19..620064d2ac 100644
--- a/flytepropeller/pkg/controller/nodes/output_resolver.go
+++ b/flytepropeller/pkg/controller/nodes/output_resolver.go
@@ -86,7 +86,7 @@ func resolveSubtaskOutput(ctx context.Context, store storage.ProtobufStore, node
"Outputs not found at [%v]", outputsFileRef)
}
- l, ok := d.Literals[varName]
+ l, ok := d.GetLiterals()[varName]
if !ok {
return nil, errors.Errorf(errors.BadSpecificationError, nodeID, "Output of array tasks is expected to be "+
"a single literal map entry named 'array' of type LiteralCollection.")
@@ -97,7 +97,7 @@ func resolveSubtaskOutput(ctx context.Context, store storage.ProtobufStore, node
"is of type [%v]. LiteralCollection is expected.", reflect.TypeOf(l.GetValue()))
}
- literals := l.GetCollection().Literals
+ literals := l.GetCollection().GetLiterals()
if idx >= len(literals) {
return nil, errors.Errorf(errors.OutputsNotFoundError, nodeID, "Failed to find [%v[%v].%v]",
nodeID, idx, varName)
@@ -120,7 +120,7 @@ func resolveSingleOutput(ctx context.Context, store storage.ProtobufStore, nodeI
"Outputs not found at [%v]", outputsFileRef)
}
- l, ok := d.Literals[varName]
+ l, ok := d.GetLiterals()[varName]
if !ok {
return nil, errors.Errorf(errors.OutputsNotFoundError, nodeID,
"Failed to find [%v].[%v]", nodeID, varName)
diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/handler_test.go b/flytepropeller/pkg/controller/nodes/subworkflow/handler_test.go
index ea21ce1171..f819a47f0b 100644
--- a/flytepropeller/pkg/controller/nodes/subworkflow/handler_test.go
+++ b/flytepropeller/pkg/controller/nodes/subworkflow/handler_test.go
@@ -174,11 +174,11 @@ func TestWorkflowNodeHandler_StartNode_Launchplan(t *testing.T) {
mockLPExec.OnLaunchMatch(
ctx,
mock.MatchedBy(func(o launchplan.LaunchContext) bool {
- return o.ParentNodeExecution.NodeId == mockNode.GetID() &&
- o.ParentNodeExecution.ExecutionId == wfExecID
+ return o.ParentNodeExecution.GetNodeId() == mockNode.GetID() &&
+ o.ParentNodeExecution.GetExecutionId() == wfExecID
}),
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
mock.MatchedBy(func(o *core.Identifier) bool { return lpID == o }),
mock.MatchedBy(func(o *core.LiteralMap) bool { return o.Literals == nil }),
@@ -200,11 +200,11 @@ func TestWorkflowNodeHandler_StartNode_Launchplan(t *testing.T) {
mockLPExec.OnLaunchMatch(
ctx,
mock.MatchedBy(func(o launchplan.LaunchContext) bool {
- return o.ParentNodeExecution.NodeId == mockNode.GetID() &&
- o.ParentNodeExecution.ExecutionId == wfExecID
+ return o.ParentNodeExecution.GetNodeId() == mockNode.GetID() &&
+ o.ParentNodeExecution.GetExecutionId() == wfExecID
}),
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
mock.MatchedBy(func(o *core.Identifier) bool { return lpID == o }),
mock.MatchedBy(func(o *core.LiteralMap) bool { return o.Literals == nil }),
@@ -256,7 +256,7 @@ func TestWorkflowNodeHandler_CheckNodeStatus(t *testing.T) {
mockLPExec.OnGetStatusMatch(
ctx,
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
).Return(&admin.ExecutionClosure{
Phase: core.WorkflowExecution_RUNNING,
@@ -277,7 +277,7 @@ func TestWorkflowNodeHandler_CheckNodeStatus(t *testing.T) {
mockLPExec.OnGetStatusMatch(
ctx,
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
).Return(&admin.ExecutionClosure{
Phase: core.WorkflowExecution_RUNNING,
@@ -329,7 +329,7 @@ func TestWorkflowNodeHandler_AbortNode(t *testing.T) {
mockLPExec.OnKillMatch(
ctx,
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
mock.AnythingOfType(reflect.String.String()),
).Return(nil)
@@ -351,7 +351,7 @@ func TestWorkflowNodeHandler_AbortNode(t *testing.T) {
mockLPExec.OnKillMatch(
ctx,
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
mock.AnythingOfType(reflect.String.String()),
).Return(nil)
@@ -371,7 +371,7 @@ func TestWorkflowNodeHandler_AbortNode(t *testing.T) {
mockLPExec.OnKillMatch(
ctx,
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
mock.AnythingOfType(reflect.String.String()),
).Return(expectedErr)
diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan.go b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan.go
index 60802a6486..16d0134740 100644
--- a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan.go
+++ b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan.go
@@ -28,17 +28,17 @@ type launchPlanHandler struct {
func getParentNodeExecutionID(nCtx interfaces.NodeExecutionContext) (*core.NodeExecutionIdentifier, error) {
nodeExecID := &core.NodeExecutionIdentifier{
- ExecutionId: nCtx.NodeExecutionMetadata().GetNodeExecutionID().ExecutionId,
+ ExecutionId: nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetExecutionId(),
}
if nCtx.ExecutionContext().GetEventVersion() != v1alpha1.EventVersion0 {
var err error
- currentNodeUniqueID, err := common.GenerateUniqueID(nCtx.ExecutionContext().GetParentInfo(), nCtx.NodeExecutionMetadata().GetNodeExecutionID().NodeId)
+ currentNodeUniqueID, err := common.GenerateUniqueID(nCtx.ExecutionContext().GetParentInfo(), nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetNodeId())
if err != nil {
return nil, err
}
nodeExecID.NodeId = currentNodeUniqueID
} else {
- nodeExecID.NodeId = nCtx.NodeExecutionMetadata().GetNodeExecutionID().NodeId
+ nodeExecID.NodeId = nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetNodeId()
}
return nodeExecID, nil
}
@@ -77,11 +77,11 @@ func (l *launchPlanHandler) StartLaunchPlan(ctx context.Context, nCtx interfaces
}
if nCtx.ExecutionContext().GetExecutionConfig().RecoveryExecution.WorkflowExecutionIdentifier != nil {
- fullyQualifiedNodeID := nCtx.NodeExecutionMetadata().GetNodeExecutionID().NodeId
+ fullyQualifiedNodeID := nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetNodeId()
if nCtx.ExecutionContext().GetEventVersion() != v1alpha1.EventVersion0 {
// compute fully qualified node id (prefixed with parent id and retry attempt) to ensure uniqueness
var err error
- fullyQualifiedNodeID, err = common.GenerateUniqueID(nCtx.ExecutionContext().GetParentInfo(), nCtx.NodeExecutionMetadata().GetNodeExecutionID().NodeId)
+ fullyQualifiedNodeID, err = common.GenerateUniqueID(nCtx.ExecutionContext().GetParentInfo(), nCtx.NodeExecutionMetadata().GetNodeExecutionID().GetNodeId())
if err != nil {
return handler.UnknownTransition, err
}
@@ -94,11 +94,11 @@ func (l *launchPlanHandler) StartLaunchPlan(ctx context.Context, nCtx interfaces
logger.Warnf(ctx, "Failed to recover workflow node [%+v] with err [%+v]", nCtx.NodeExecutionMetadata().GetNodeExecutionID(), err)
}
}
- if recovered != nil && recovered.Closure != nil && recovered.Closure.Phase == core.NodeExecution_SUCCEEDED {
- if recovered.Closure.GetWorkflowNodeMetadata() != nil {
- launchCtx.RecoveryExecution = recovered.Closure.GetWorkflowNodeMetadata().ExecutionId
+ if recovered != nil && recovered.GetClosure() != nil && recovered.GetClosure().GetPhase() == core.NodeExecution_SUCCEEDED {
+ if recovered.GetClosure().GetWorkflowNodeMetadata() != nil {
+ launchCtx.RecoveryExecution = recovered.GetClosure().GetWorkflowNodeMetadata().GetExecutionId()
} else {
- logger.Debugf(ctx, "Attempted to recovered workflow node execution [%+v] but was missing workflow node metadata", recovered.Id)
+ logger.Debugf(ctx, "Attempted to recovered workflow node execution [%+v] but was missing workflow node metadata", recovered.GetId())
}
}
}
@@ -106,7 +106,7 @@ func (l *launchPlanHandler) StartLaunchPlan(ctx context.Context, nCtx interfaces
nodeInputs, nCtx.NodeExecutionMetadata().GetOwnerID().String())
if err != nil {
if launchplan.IsAlreadyExists(err) {
- logger.Infof(ctx, "Execution already exists [%s].", childID.Name)
+ logger.Infof(ctx, "Execution already exists [%s].", childID.GetName())
} else if launchplan.IsUserError(err) {
return handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoFailure(core.ExecutionError_USER, errors.RuntimeExecutionError, err.Error(), nil)), nil
} else {
@@ -114,7 +114,7 @@ func (l *launchPlanHandler) StartLaunchPlan(ctx context.Context, nCtx interfaces
}
} else {
eCtx := nCtx.ExecutionContext()
- logger.Infof(ctx, "Launched launchplan with ID [%s], Parallelism is now set to [%d]", childID.Name, eCtx.IncrementParallelism())
+ logger.Infof(ctx, "Launched launchplan with ID [%s], Parallelism is now set to [%d]", childID.GetName(), eCtx.IncrementParallelism())
}
return handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{
@@ -180,7 +180,7 @@ func (l *launchPlanHandler) CheckLaunchPlanStatus(ctx context.Context, nCtx inte
switch wfStatusClosure.GetPhase() {
case core.WorkflowExecution_ABORTED:
wErr = fmt.Errorf("launchplan execution aborted")
- err = errors.Wrapf(errors.RemoteChildWorkflowExecutionFailed, nCtx.NodeID(), wErr, "launchplan [%s] aborted", childID.Name)
+ err = errors.Wrapf(errors.RemoteChildWorkflowExecutionFailed, nCtx.NodeID(), wErr, "launchplan [%s] aborted", childID.GetName())
return handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoFailure(core.ExecutionError_USER, errors.RemoteChildWorkflowExecutionFailed, err.Error(), &handler.ExecutionInfo{
WorkflowNodeInfo: &handler.WorkflowNodeInfo{LaunchedWorkflowID: childID},
})), nil
diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/admin.go b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/admin.go
index 1ce0568bf6..91709b411d 100644
--- a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/admin.go
+++ b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/admin.go
@@ -51,7 +51,7 @@ func (e executionCacheItem) IsTerminal() bool {
if e.ExecutionClosure == nil {
return false
}
- return e.ExecutionClosure.Phase == core.WorkflowExecution_ABORTED || e.ExecutionClosure.Phase == core.WorkflowExecution_FAILED || e.ExecutionClosure.Phase == core.WorkflowExecution_SUCCEEDED
+ return e.ExecutionClosure.GetPhase() == core.WorkflowExecution_ABORTED || e.ExecutionClosure.GetPhase() == core.WorkflowExecution_FAILED || e.ExecutionClosure.GetPhase() == core.WorkflowExecution_SUCCEEDED
}
func (e executionCacheItem) ID() string {
@@ -63,7 +63,7 @@ func (a *adminLaunchPlanExecutor) handleLaunchError(ctx context.Context, isRecov
statusCode := status.Code(err)
if isRecovery && statusCode == codes.NotFound {
- logger.Warnf(ctx, "failed to recover workflow [%s] with err %+v. will attempt to launch instead", launchPlanRef.Name, err)
+ logger.Warnf(ctx, "failed to recover workflow [%s] with err %+v. will attempt to launch instead", launchPlanRef.GetName(), err)
return nil
}
switch statusCode {
@@ -73,9 +73,9 @@ func (a *adminLaunchPlanExecutor) handleLaunchError(ctx context.Context, isRecov
logger.Errorf(ctx, "Failed to add ExecID [%v] to auto refresh cache", executionID)
}
- return stdErr.Wrapf(RemoteErrorAlreadyExists, err, "ExecID %s already exists", executionID.Name)
+ return stdErr.Wrapf(RemoteErrorAlreadyExists, err, "ExecID %s already exists", executionID.GetName())
case codes.DataLoss, codes.DeadlineExceeded, codes.Internal, codes.Unknown, codes.Canceled:
- return stdErr.Wrapf(RemoteErrorSystem, err, "failed to launch workflow [%s], system error", launchPlanRef.Name)
+ return stdErr.Wrapf(RemoteErrorSystem, err, "failed to launch workflow [%s], system error", launchPlanRef.GetName())
default:
return stdErr.Wrapf(RemoteErrorUser, err, "failed to launch workflow")
}
@@ -88,7 +88,7 @@ func (a *adminLaunchPlanExecutor) Launch(ctx context.Context, launchCtx LaunchCo
if launchCtx.RecoveryExecution != nil {
_, err = a.adminClient.RecoverExecution(ctx, &admin.ExecutionRecoverRequest{
Id: launchCtx.RecoveryExecution,
- Name: executionID.Name,
+ Name: executionID.GetName(),
Metadata: &admin.ExecutionMetadata{
ParentNodeExecution: launchCtx.ParentNodeExecution,
},
@@ -128,9 +128,9 @@ func (a *adminLaunchPlanExecutor) Launch(ctx context.Context, launchCtx LaunchCo
}
req := &admin.ExecutionCreateRequest{
- Project: executionID.Project,
- Domain: executionID.Domain,
- Name: executionID.Name,
+ Project: executionID.GetProject(),
+ Domain: executionID.GetDomain(),
+ Name: executionID.GetName(),
Inputs: inputs,
Spec: &admin.ExecutionSpec{
LaunchPlan: launchPlanRef,
@@ -143,7 +143,7 @@ func (a *adminLaunchPlanExecutor) Launch(ctx context.Context, launchCtx LaunchCo
Labels: &admin.Labels{Values: labels},
Annotations: &admin.Annotations{Values: launchCtx.Annotations},
SecurityContext: &launchCtx.SecurityContext,
- MaxParallelism: int32(launchCtx.MaxParallelism),
+ MaxParallelism: int32(launchCtx.MaxParallelism), // #nosec G115
RawOutputDataConfig: launchCtx.RawOutputDataConfig,
Interruptible: interruptible,
OverwriteCache: launchCtx.OverwriteCache,
@@ -235,8 +235,8 @@ func (a *adminLaunchPlanExecutor) syncItem(ctx context.Context, batch cache.Batc
// Is workflow already terminated, then no need to fetch information, also the item can be dropped from the cache
if exec.ExecutionClosure != nil {
- if IsWorkflowTerminated(exec.ExecutionClosure.Phase) {
- logger.Debugf(ctx, "Workflow [%s] is already completed, will not fetch execution information", exec.ExecutionClosure.WorkflowId)
+ if IsWorkflowTerminated(exec.ExecutionClosure.GetPhase()) {
+ logger.Debugf(ctx, "Workflow [%s] is already completed, will not fetch execution information", exec.ExecutionClosure.GetWorkflowId())
resp = append(resp, cache.ItemSyncResponse{
ID: obj.GetID(),
Item: exec,
@@ -256,7 +256,7 @@ func (a *adminLaunchPlanExecutor) syncItem(ctx context.Context, batch cache.Batc
// TODO: Define which error codes are system errors (and return the error) vs user stdErr.
if status.Code(err) == codes.NotFound {
- err = stdErr.Wrapf(RemoteErrorNotFound, err, "execID [%s] not found on remote", exec.WorkflowExecutionIdentifier.Name)
+ err = stdErr.Wrapf(RemoteErrorNotFound, err, "execID [%s] not found on remote", exec.WorkflowExecutionIdentifier.GetName())
} else {
err = stdErr.Wrapf(RemoteErrorSystem, err, "system error")
}
@@ -315,7 +315,7 @@ func (a *adminLaunchPlanExecutor) syncItem(ctx context.Context, batch cache.Batc
ID: obj.GetID(),
Item: executionCacheItem{
WorkflowExecutionIdentifier: exec.WorkflowExecutionIdentifier,
- ExecutionClosure: res.Closure,
+ ExecutionClosure: res.GetClosure(),
ExecutionOutputs: outputs,
ParentWorkflowID: exec.ParentWorkflowID,
},
@@ -327,7 +327,7 @@ func (a *adminLaunchPlanExecutor) syncItem(ctx context.Context, batch cache.Batc
// prematurely, there is a chance the parent workflow evaluates before the cache is updated.
for _, itemSyncResponse := range resp {
exec := itemSyncResponse.Item.(executionCacheItem)
- if exec.ExecutionClosure != nil && IsWorkflowTerminated(exec.ExecutionClosure.Phase) {
+ if exec.ExecutionClosure != nil && IsWorkflowTerminated(exec.ExecutionClosure.GetPhase()) {
a.enqueueWorkflow(exec.ParentWorkflowID)
}
}
@@ -344,7 +344,8 @@ func NewAdminLaunchPlanExecutor(_ context.Context, client service.AdminServiceCl
}
rateLimiter := &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(cfg.TPS), cfg.Burst)}
- c, err := cache.NewAutoRefreshCache("admin-launcher", exec.syncItem, rateLimiter, cfg.CacheResyncDuration.Duration, cfg.Workers, cfg.MaxCacheSize, scope)
+ // #nosec G115
+ c, err := cache.NewAutoRefreshCache("admin-launcher", exec.syncItem, rateLimiter, cfg.CacheResyncDuration.Duration, uint(cfg.Workers), uint(cfg.MaxCacheSize), scope)
if err != nil {
return nil, err
}
diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/admin_test.go b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/admin_test.go
index ead1312e17..cf60cc85d8 100644
--- a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/admin_test.go
+++ b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/admin_test.go
@@ -61,7 +61,7 @@ func TestAdminLaunchPlanExecutor_GetStatus(t *testing.T) {
mockClient.On("CreateExecution",
ctx,
mock.MatchedBy(func(o *admin.ExecutionCreateRequest) bool {
- return o.Project == "p" && o.Domain == "d" && o.Name == "n" && o.Spec.Inputs == nil
+ return o.GetProject() == "p" && o.GetDomain() == "d" && o.GetName() == "n" && o.GetSpec().GetInputs() == nil
}),
).Return(nil, nil)
@@ -108,7 +108,7 @@ func TestAdminLaunchPlanExecutor_GetStatus(t *testing.T) {
mockClient.On("CreateExecution",
ctx,
mock.MatchedBy(func(o *admin.ExecutionCreateRequest) bool {
- return o.Project == "p" && o.Domain == "d" && o.Name == "n" && o.Spec.Inputs == nil
+ return o.GetProject() == "p" && o.GetDomain() == "d" && o.GetName() == "n" && o.GetSpec().GetInputs() == nil
}),
).Return(nil, nil)
@@ -170,9 +170,9 @@ func TestAdminLaunchPlanExecutor_Launch(t *testing.T) {
mockClient.On("CreateExecution",
ctx,
mock.MatchedBy(func(o *admin.ExecutionCreateRequest) bool {
- return o.Project == "p" && o.Domain == "d" && o.Name == "n" && o.Spec.Inputs == nil &&
- o.Spec.Metadata.Mode == admin.ExecutionMetadata_CHILD_WORKFLOW &&
- reflect.DeepEqual(o.Spec.Labels.Values, map[string]string{"foo": "bar"}) // Ensure shard-key was removed.
+ return o.GetProject() == "p" && o.GetDomain() == "d" && o.GetName() == "n" && o.GetSpec().GetInputs() == nil &&
+ o.GetSpec().GetMetadata().GetMode() == admin.ExecutionMetadata_CHILD_WORKFLOW &&
+ reflect.DeepEqual(o.GetSpec().GetLabels().GetValues(), map[string]string{"foo": "bar"}) // Ensure shard-key was removed.
}),
).Return(nil, nil)
assert.NoError(t, err)
@@ -216,8 +216,8 @@ func TestAdminLaunchPlanExecutor_Launch(t *testing.T) {
mockClient.On("RecoverExecution",
ctx,
mock.MatchedBy(func(o *admin.ExecutionRecoverRequest) bool {
- return o.Id.Project == "p" && o.Id.Domain == "d" && o.Id.Name == "w" && o.Name == "n" &&
- proto.Equal(o.Metadata.ParentNodeExecution, parentNodeExecution)
+ return o.GetId().GetProject() == "p" && o.GetId().GetDomain() == "d" && o.GetId().GetName() == "w" && o.GetName() == "n" &&
+ proto.Equal(o.GetMetadata().GetParentNodeExecution(), parentNodeExecution)
}),
).Return(nil, nil)
assert.NoError(t, err)
@@ -256,8 +256,8 @@ func TestAdminLaunchPlanExecutor_Launch(t *testing.T) {
mockClient.On("RecoverExecution",
ctx,
mock.MatchedBy(func(o *admin.ExecutionRecoverRequest) bool {
- return o.Id.Project == "p" && o.Id.Domain == "d" && o.Id.Name == "w" && o.Name == "n" &&
- proto.Equal(o.Metadata.ParentNodeExecution, parentNodeExecution)
+ return o.GetId().GetProject() == "p" && o.GetId().GetDomain() == "d" && o.GetId().GetName() == "w" && o.GetName() == "n" &&
+ proto.Equal(o.GetMetadata().GetParentNodeExecution(), parentNodeExecution)
}),
).Return(nil, recoveryErr)
@@ -266,8 +266,8 @@ func TestAdminLaunchPlanExecutor_Launch(t *testing.T) {
ctx,
mock.MatchedBy(func(o *admin.ExecutionCreateRequest) bool {
createCalled = true
- return o.Project == "p" && o.Domain == "d" && o.Name == "n" && o.Spec.Inputs == nil &&
- o.Spec.Metadata.Mode == admin.ExecutionMetadata_CHILD_WORKFLOW
+ return o.GetProject() == "p" && o.GetDomain() == "d" && o.GetName() == "n" && o.GetSpec().GetInputs() == nil &&
+ o.GetSpec().GetMetadata().GetMode() == admin.ExecutionMetadata_CHILD_WORKFLOW
}),
).Return(nil, nil)
@@ -367,7 +367,7 @@ func TestAdminLaunchPlanExecutor_Kill(t *testing.T) {
exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, adminConfig, promutils.NewTestScope(), memStore, func(string) {})
mockClient.On("TerminateExecution",
ctx,
- mock.MatchedBy(func(o *admin.ExecutionTerminateRequest) bool { return o.Id == id && o.Cause == reason }),
+ mock.MatchedBy(func(o *admin.ExecutionTerminateRequest) bool { return o.GetId() == id && o.GetCause() == reason }),
).Return(&admin.ExecutionTerminateResponse{}, nil)
assert.NoError(t, err)
err = exec.Kill(ctx, id, reason)
@@ -380,7 +380,7 @@ func TestAdminLaunchPlanExecutor_Kill(t *testing.T) {
exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, adminConfig, promutils.NewTestScope(), memStore, func(string) {})
mockClient.On("TerminateExecution",
ctx,
- mock.MatchedBy(func(o *admin.ExecutionTerminateRequest) bool { return o.Id == id && o.Cause == reason }),
+ mock.MatchedBy(func(o *admin.ExecutionTerminateRequest) bool { return o.GetId() == id && o.GetCause() == reason }),
).Return(nil, status.Error(codes.NotFound, ""))
assert.NoError(t, err)
err = exec.Kill(ctx, id, reason)
@@ -393,7 +393,7 @@ func TestAdminLaunchPlanExecutor_Kill(t *testing.T) {
exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, adminConfig, promutils.NewTestScope(), memStore, func(string) {})
mockClient.On("TerminateExecution",
ctx,
- mock.MatchedBy(func(o *admin.ExecutionTerminateRequest) bool { return o.Id == id && o.Cause == reason }),
+ mock.MatchedBy(func(o *admin.ExecutionTerminateRequest) bool { return o.GetId() == id && o.GetCause() == reason }),
).Return(nil, status.Error(codes.Canceled, ""))
assert.NoError(t, err)
err = exec.Kill(ctx, id, reason)
@@ -426,7 +426,7 @@ func TestNewAdminLaunchPlanExecutor_GetLaunchPlan(t *testing.T) {
).Return(&admin.LaunchPlan{Id: id}, nil)
lp, err := exec.GetLaunchPlan(ctx, id)
assert.NoError(t, err)
- assert.Equal(t, lp.Id, id)
+ assert.Equal(t, lp.GetId(), id)
})
t.Run("launch plan not found", func(t *testing.T) {
diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/noop.go b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/noop.go
index 3f7444788d..4c5873cc4d 100644
--- a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/noop.go
+++ b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/noop.go
@@ -19,12 +19,12 @@ type failFastWorkflowLauncher struct {
func (failFastWorkflowLauncher) Launch(ctx context.Context, launchCtx LaunchContext, executionID *core.WorkflowExecutionIdentifier,
launchPlanRef *core.Identifier, inputs *core.LiteralMap, parentWorkflowID v1alpha1.WorkflowID) error {
- logger.Infof(ctx, "Fail: Launch Workflow requested with ExecID [%s], LaunchPlan [%s]", executionID.Name, fmt.Sprintf("%s:%s:%s", launchPlanRef.Project, launchPlanRef.Domain, launchPlanRef.Name))
+ logger.Infof(ctx, "Fail: Launch Workflow requested with ExecID [%s], LaunchPlan [%s]", executionID.GetName(), fmt.Sprintf("%s:%s:%s", launchPlanRef.GetProject(), launchPlanRef.GetDomain(), launchPlanRef.GetName()))
return errors.Wrapf(RemoteErrorUser, fmt.Errorf("badly configured system"), "please enable admin workflow launch to use launchplans")
}
func (failFastWorkflowLauncher) GetStatus(ctx context.Context, executionID *core.WorkflowExecutionIdentifier) (*admin.ExecutionClosure, *core.LiteralMap, error) {
- logger.Infof(ctx, "NOOP: Workflow Status ExecID [%s]", executionID.Name)
+ logger.Infof(ctx, "NOOP: Workflow Status ExecID [%s]", executionID.GetName())
return nil, nil, errors.Wrapf(RemoteErrorUser, fmt.Errorf("badly configured system"), "please enable admin workflow launch to use launchplans")
}
diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan_test.go b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan_test.go
index 68b5383b78..62445d5efa 100644
--- a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan_test.go
+++ b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan_test.go
@@ -77,11 +77,11 @@ func TestSubWorkflowHandler_StartLaunchPlan(t *testing.T) {
mockLPExec.On("Launch",
ctx,
mock.MatchedBy(func(o launchplan.LaunchContext) bool {
- return o.ParentNodeExecution.NodeId == mockNode.GetID() &&
- o.ParentNodeExecution.ExecutionId == wfExecID
+ return o.ParentNodeExecution.GetNodeId() == mockNode.GetID() &&
+ o.ParentNodeExecution.GetExecutionId() == wfExecID
}),
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
mock.MatchedBy(func(o *core.Identifier) bool { return lpID == o }),
mock.MatchedBy(func(o *core.LiteralMap) bool { return o.Literals == nil }),
@@ -107,11 +107,11 @@ func TestSubWorkflowHandler_StartLaunchPlan(t *testing.T) {
mockLPExec.On("Launch",
ctx,
mock.MatchedBy(func(o launchplan.LaunchContext) bool {
- return o.ParentNodeExecution.NodeId == mockNode.GetID() &&
- o.ParentNodeExecution.ExecutionId == wfExecID
+ return o.ParentNodeExecution.GetNodeId() == mockNode.GetID() &&
+ o.ParentNodeExecution.GetExecutionId() == wfExecID
}),
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
mock.MatchedBy(func(o *core.Identifier) bool { return lpID == o }),
mock.MatchedBy(func(o *core.LiteralMap) bool { return o.Literals == nil }),
@@ -134,11 +134,11 @@ func TestSubWorkflowHandler_StartLaunchPlan(t *testing.T) {
mockLPExec.On("Launch",
ctx,
mock.MatchedBy(func(o launchplan.LaunchContext) bool {
- return o.ParentNodeExecution.NodeId == mockNode.GetID() &&
- o.ParentNodeExecution.ExecutionId == wfExecID
+ return o.ParentNodeExecution.GetNodeId() == mockNode.GetID() &&
+ o.ParentNodeExecution.GetExecutionId() == wfExecID
}),
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
mock.MatchedBy(func(o *core.Identifier) bool { return lpID == o }),
mock.MatchedBy(func(o *core.LiteralMap) bool { return o.Literals == nil }),
@@ -161,11 +161,11 @@ func TestSubWorkflowHandler_StartLaunchPlan(t *testing.T) {
mockLPExec.On("Launch",
ctx,
mock.MatchedBy(func(o launchplan.LaunchContext) bool {
- return o.ParentNodeExecution.NodeId == mockNode.GetID() &&
- o.ParentNodeExecution.ExecutionId == wfExecID
+ return o.ParentNodeExecution.GetNodeId() == mockNode.GetID() &&
+ o.ParentNodeExecution.GetExecutionId() == wfExecID
}),
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
mock.MatchedBy(func(o *core.Identifier) bool { return lpID == o }),
mock.MatchedBy(func(o *core.LiteralMap) bool { return o.Literals == nil }),
@@ -216,11 +216,11 @@ func TestSubWorkflowHandler_StartLaunchPlan(t *testing.T) {
mockLPExec.On("Launch",
ctx,
mock.MatchedBy(func(o launchplan.LaunchContext) bool {
- return o.ParentNodeExecution.NodeId == mockNode.GetID() &&
- o.ParentNodeExecution.ExecutionId == wfExecID
+ return o.ParentNodeExecution.GetNodeId() == mockNode.GetID() &&
+ o.ParentNodeExecution.GetExecutionId() == wfExecID
}),
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
mock.MatchedBy(func(o *core.Identifier) bool { return lpID == o }),
mock.MatchedBy(func(o *core.LiteralMap) bool { return o.Literals == nil }),
@@ -317,7 +317,7 @@ func TestSubWorkflowHandler_CheckLaunchPlanStatus(t *testing.T) {
mockLPExec.On("GetStatus",
ctx,
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
).Return(&admin.ExecutionClosure{
Phase: core.WorkflowExecution_RUNNING,
@@ -340,7 +340,7 @@ func TestSubWorkflowHandler_CheckLaunchPlanStatus(t *testing.T) {
mockLPExec.On("GetStatus",
ctx,
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
).Return(&admin.ExecutionClosure{
Phase: core.WorkflowExecution_SUCCEEDED,
@@ -375,7 +375,7 @@ func TestSubWorkflowHandler_CheckLaunchPlanStatus(t *testing.T) {
mockLPExec.On("GetStatus",
ctx,
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
).Return(&admin.ExecutionClosure{
Phase: core.WorkflowExecution_SUCCEEDED,
@@ -417,7 +417,7 @@ func TestSubWorkflowHandler_CheckLaunchPlanStatus(t *testing.T) {
mockLPExec.On("GetStatus",
ctx,
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
).Return(&admin.ExecutionClosure{
Phase: core.WorkflowExecution_SUCCEEDED,
@@ -454,7 +454,7 @@ func TestSubWorkflowHandler_CheckLaunchPlanStatus(t *testing.T) {
mockLPExec.On("GetStatus",
ctx,
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
).Return(&admin.ExecutionClosure{
Phase: core.WorkflowExecution_FAILED,
@@ -484,7 +484,7 @@ func TestSubWorkflowHandler_CheckLaunchPlanStatus(t *testing.T) {
mockLPExec.On("GetStatus",
ctx,
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
).Return(&admin.ExecutionClosure{
Phase: core.WorkflowExecution_FAILED,
@@ -508,7 +508,7 @@ func TestSubWorkflowHandler_CheckLaunchPlanStatus(t *testing.T) {
mockLPExec.On("GetStatus",
ctx,
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
).Return(&admin.ExecutionClosure{
Phase: core.WorkflowExecution_ABORTED,
@@ -532,7 +532,7 @@ func TestSubWorkflowHandler_CheckLaunchPlanStatus(t *testing.T) {
mockLPExec.On("GetStatus",
ctx,
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
).Return(nil, &core.LiteralMap{}, errors.Wrapf(launchplan.RemoteErrorNotFound, fmt.Errorf("some error"), "not found"))
@@ -554,7 +554,7 @@ func TestSubWorkflowHandler_CheckLaunchPlanStatus(t *testing.T) {
mockLPExec.On("GetStatus",
ctx,
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
).Return(nil, &core.LiteralMap{}, errors.Wrapf(launchplan.RemoteErrorSystem, fmt.Errorf("some error"), "not found"))
@@ -582,7 +582,7 @@ func TestSubWorkflowHandler_CheckLaunchPlanStatus(t *testing.T) {
mockLPExec.On("GetStatus",
ctx,
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
).Return(&admin.ExecutionClosure{
Phase: core.WorkflowExecution_SUCCEEDED,
@@ -616,7 +616,7 @@ func TestSubWorkflowHandler_CheckLaunchPlanStatus(t *testing.T) {
mockLPExec.On("GetStatus",
ctx,
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
).Return(&admin.ExecutionClosure{
Phase: core.WorkflowExecution_SUCCEEDED,
@@ -670,7 +670,7 @@ func TestLaunchPlanHandler_HandleAbort(t *testing.T) {
mockLPExec.On("Kill",
ctx,
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
mock.AnythingOfType(reflect.String.String()),
).Return(nil)
@@ -692,7 +692,7 @@ func TestLaunchPlanHandler_HandleAbort(t *testing.T) {
mockLPExec.On("Kill",
ctx,
mock.MatchedBy(func(o *core.WorkflowExecutionIdentifier) bool {
- return assert.Equal(t, wfExecID.Project, o.Project) && assert.Equal(t, wfExecID.Domain, o.Domain)
+ return assert.Equal(t, wfExecID.GetProject(), o.GetProject()) && assert.Equal(t, wfExecID.GetDomain(), o.GetDomain())
}),
mock.AnythingOfType(reflect.String.String()),
).Return(expectedErr)
diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/util.go b/flytepropeller/pkg/controller/nodes/subworkflow/util.go
index ae23439c97..8c119a2175 100644
--- a/flytepropeller/pkg/controller/nodes/subworkflow/util.go
+++ b/flytepropeller/pkg/controller/nodes/subworkflow/util.go
@@ -10,21 +10,21 @@ import (
const maxLengthForSubWorkflow = 20
func GetChildWorkflowExecutionID(nodeExecID *core.NodeExecutionIdentifier, attempt uint32) (*core.WorkflowExecutionIdentifier, error) {
- name, err := encoding.FixedLengthUniqueIDForParts(maxLengthForSubWorkflow, []string{nodeExecID.ExecutionId.Name, nodeExecID.NodeId, strconv.Itoa(int(attempt))})
+ name, err := encoding.FixedLengthUniqueIDForParts(maxLengthForSubWorkflow, []string{nodeExecID.GetExecutionId().GetName(), nodeExecID.GetNodeId(), strconv.Itoa(int(attempt))})
if err != nil {
return nil, err
}
// Restriction on name is 20 chars
return &core.WorkflowExecutionIdentifier{
- Project: nodeExecID.ExecutionId.Project,
- Domain: nodeExecID.ExecutionId.Domain,
+ Project: nodeExecID.GetExecutionId().GetProject(),
+ Domain: nodeExecID.GetExecutionId().GetDomain(),
Name: name,
}, nil
}
func GetChildWorkflowExecutionIDV2(nodeExecID *core.NodeExecutionIdentifier, attempt uint32) (*core.WorkflowExecutionIdentifier, error) {
- name, err := encoding.FixedLengthUniqueIDForParts(maxLengthForSubWorkflow, []string{nodeExecID.ExecutionId.Name, nodeExecID.NodeId, strconv.Itoa(int(attempt))},
+ name, err := encoding.FixedLengthUniqueIDForParts(maxLengthForSubWorkflow, []string{nodeExecID.GetExecutionId().GetName(), nodeExecID.GetNodeId(), strconv.Itoa(int(attempt))},
encoding.NewAlgorithmOption(encoding.Algorithm64))
if err != nil {
return nil, err
@@ -32,8 +32,8 @@ func GetChildWorkflowExecutionIDV2(nodeExecID *core.NodeExecutionIdentifier, att
// Restriction on name is 20 chars
return &core.WorkflowExecutionIdentifier{
- Project: nodeExecID.ExecutionId.Project,
- Domain: nodeExecID.ExecutionId.Domain,
+ Project: nodeExecID.GetExecutionId().GetProject(),
+ Domain: nodeExecID.GetExecutionId().GetDomain(),
Name: name,
}, nil
}
diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/util_test.go b/flytepropeller/pkg/controller/nodes/subworkflow/util_test.go
index f1df02deb6..96b93b8f8a 100644
--- a/flytepropeller/pkg/controller/nodes/subworkflow/util_test.go
+++ b/flytepropeller/pkg/controller/nodes/subworkflow/util_test.go
@@ -19,6 +19,6 @@ func TestGetChildWorkflowExecutionID(t *testing.T) {
},
},
1)
- assert.Equal(t, id.Name, "fav2uxxi")
+ assert.Equal(t, id.GetName(), "fav2uxxi")
assert.NoError(t, err)
}
diff --git a/flytepropeller/pkg/controller/nodes/task/cache.go b/flytepropeller/pkg/controller/nodes/task/cache.go
index fab3cd4d61..d408a5af85 100644
--- a/flytepropeller/pkg/controller/nodes/task/cache.go
+++ b/flytepropeller/pkg/controller/nodes/task/cache.go
@@ -26,10 +26,10 @@ func (t *Handler) GetCatalogKey(ctx context.Context, nCtx interfaces.NodeExecuti
}
return catalog.Key{
- Identifier: *taskTemplate.Id,
- CacheVersion: taskTemplate.Metadata.DiscoveryVersion,
- CacheIgnoreInputVars: taskTemplate.Metadata.CacheIgnoreInputVars,
- TypedInterface: *taskTemplate.Interface,
+ Identifier: *taskTemplate.Id, //nolint:protogetter
+ CacheVersion: taskTemplate.GetMetadata().GetDiscoveryVersion(),
+ CacheIgnoreInputVars: taskTemplate.GetMetadata().GetCacheIgnoreInputVars(),
+ TypedInterface: *taskTemplate.GetInterface(),
InputReader: nCtx.InputReader(),
}, nil
}
@@ -62,5 +62,5 @@ func (t *Handler) IsCacheable(ctx context.Context, nCtx interfaces.NodeExecution
return false, false, err
}
- return taskTemplate.Metadata.Discoverable, taskTemplate.Metadata.Discoverable && taskTemplate.Metadata.CacheSerializable, nil
+ return taskTemplate.GetMetadata().GetDiscoverable(), taskTemplate.GetMetadata().GetDiscoverable() && taskTemplate.GetMetadata().GetCacheSerializable(), nil
}
diff --git a/flytepropeller/pkg/controller/nodes/task/handler.go b/flytepropeller/pkg/controller/nodes/task/handler.go
index 2adea27312..000d6bd7e7 100644
--- a/flytepropeller/pkg/controller/nodes/task/handler.go
+++ b/flytepropeller/pkg/controller/nodes/task/handler.go
@@ -434,6 +434,7 @@ func (t Handler) invokePlugin(ctx context.Context, p pluginCore.Plugin, tCtx *ta
pluginTrns.TransitionPreviouslyRecorded()
return pluginTrns, nil
}
+ // #nosec G115
if pluginTrns.pInfo.Version() > uint32(t.cfg.MaxPluginPhaseVersions) {
logger.Errorf(ctx, "Too many Plugin p versions for plugin [%s]. p versions [%d/%d]", p.GetID(), pluginTrns.pInfo.Version(), t.cfg.MaxPluginPhaseVersions)
pluginTrns.ObservedExecutionError(&io.ExecutionError{
@@ -565,7 +566,7 @@ func (t Handler) Handle(ctx context.Context, nCtx interfaces.NodeExecutionContex
logger.Errorf(ctx, "failed to read TaskTemplate, error :%s", err.Error())
return handler.UnknownTransition, err
}
- if tk.Interface != nil && tk.Interface.Inputs != nil && len(tk.Interface.Inputs.Variables) > 0 {
+ if tk.GetInterface() != nil && tk.GetInterface().GetInputs() != nil && len(tk.GetInterface().GetInputs().GetVariables()) > 0 {
inputs, err = nCtx.InputReader().Get(ctx)
if err != nil {
logger.Errorf(ctx, "failed to read inputs when checking catalog cache %w", err)
@@ -577,7 +578,7 @@ func (t Handler) Handle(ctx context.Context, nCtx interfaces.NodeExecutionContex
occurredAt := time.Now()
// STEP 2: If no cache-hit and not transitioning to PhaseWaitingForCache, then lets invoke the plugin and wait for a transition out of undefined
if pluginTrns.execInfo.TaskNodeInfo == nil || (pluginTrns.pInfo.Phase() != pluginCore.PhaseWaitingForCache &&
- pluginTrns.execInfo.TaskNodeInfo.TaskNodeMetadata.CacheStatus != core.CatalogCacheStatus_CACHE_HIT) {
+ pluginTrns.execInfo.TaskNodeInfo.TaskNodeMetadata.GetCacheStatus() != core.CatalogCacheStatus_CACHE_HIT) {
var err error
pluginTrns, err = t.invokePlugin(ctx, p, tCtx, ts)
@@ -624,7 +625,7 @@ func (t Handler) Handle(ctx context.Context, nCtx interfaces.NodeExecutionContex
return handler.UnknownTransition, err
}
if err := nCtx.EventsRecorder().RecordTaskEvent(ctx, evInfo, t.eventConfig); err != nil {
- logger.Errorf(ctx, "Event recording failed for Plugin [%s], eventPhase [%s], error :%s", p.GetID(), evInfo.Phase.String(), err.Error())
+ logger.Errorf(ctx, "Event recording failed for Plugin [%s], eventPhase [%s], error :%s", p.GetID(), evInfo.GetPhase().String(), err.Error())
// Check for idempotency
// Check for terminate state error
return handler.UnknownTransition, err
@@ -694,8 +695,8 @@ func (t *Handler) ValidateOutput(ctx context.Context, nodeID v1alpha1.NodeID, i
return nil, err
}
- iface := tk.Interface
- outputsDeclared := iface != nil && iface.Outputs != nil && len(iface.Outputs.Variables) > 0
+ iface := tk.GetInterface()
+ outputsDeclared := iface != nil && iface.GetOutputs() != nil && len(iface.GetOutputs().GetVariables()) > 0
if r == nil {
if outputsDeclared {
@@ -838,7 +839,7 @@ func (t Handler) Abort(ctx context.Context, nCtx interfaces.NodeExecutionContext
evInfo.Phase = core.TaskExecution_ABORTED
}
if err := evRecorder.RecordTaskEvent(ctx, evInfo, t.eventConfig); err != nil {
- logger.Errorf(ctx, "Event recording failed for Plugin [%s], eventPhase [%s], error :%s", p.GetID(), evInfo.Phase.String(), err.Error())
+ logger.Errorf(ctx, "Event recording failed for Plugin [%s], eventPhase [%s], error :%s", p.GetID(), evInfo.GetPhase().String(), err.Error())
// Check for idempotency
// Check for terminate state error
return err
diff --git a/flytepropeller/pkg/controller/nodes/task/handler_test.go b/flytepropeller/pkg/controller/nodes/task/handler_test.go
index 52b937cb90..62e64c02f3 100644
--- a/flytepropeller/pkg/controller/nodes/task/handler_test.go
+++ b/flytepropeller/pkg/controller/nodes/task/handler_test.go
@@ -711,6 +711,7 @@ func Test_task_Handle_NoCatalog(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
state := &taskNodeStateHolder{}
ev := &fakeBufferedEventRecorder{}
+ // #nosec G115
nCtx := createNodeContext(tt.args.startingPluginPhase, uint32(tt.args.startingPluginPhaseVersion), tt.args.expectedState, ev, "test", state, tt.want.incrParallel)
c := &pluginCatalogMocks.Client{}
tk := Handler{
@@ -735,11 +736,11 @@ func Test_task_Handle_NoCatalog(t *testing.T) {
if tt.want.event {
if assert.Equal(t, 1, len(ev.evs)) {
e := ev.evs[0]
- assert.Equal(t, tt.want.eventPhase.String(), e.Phase.String())
+ assert.Equal(t, tt.want.eventPhase.String(), e.GetPhase().String())
if tt.args.expectedState.TaskInfo != nil {
- assert.Equal(t, tt.args.expectedState.TaskInfo.Logs, e.Logs)
+ assert.Equal(t, tt.args.expectedState.TaskInfo.Logs, e.GetLogs())
}
- if e.Phase == core.TaskExecution_RUNNING || e.Phase == core.TaskExecution_SUCCEEDED {
+ if e.GetPhase() == core.TaskExecution_RUNNING || e.GetPhase() == core.TaskExecution_SUCCEEDED {
assert.True(t, proto.Equal(inputs, e.GetInputData()))
}
}
@@ -761,11 +762,11 @@ func Test_task_Handle_NoCatalog(t *testing.T) {
assert.Equal(t, tt.args.expectedState.PhaseVersion, state.s.PluginPhaseVersion)
if tt.want.checkpoint {
assert.Equal(t, "s3://sandbox/x/name-n1-1/_flytecheckpoints",
- got.Info().GetInfo().TaskNodeInfo.TaskNodeMetadata.CheckpointUri)
+ got.Info().GetInfo().TaskNodeInfo.TaskNodeMetadata.GetCheckpointUri())
} else {
assert.True(t, got.Info().GetInfo() == nil || got.Info().GetInfo().TaskNodeInfo == nil ||
got.Info().GetInfo().TaskNodeInfo.TaskNodeMetadata == nil ||
- len(got.Info().GetInfo().TaskNodeInfo.TaskNodeMetadata.CheckpointUri) == 0)
+ len(got.Info().GetInfo().TaskNodeInfo.TaskNodeMetadata.GetCheckpointUri()) == 0)
}
}
})
diff --git a/flytepropeller/pkg/controller/nodes/task/k8s/task_exec_context.go b/flytepropeller/pkg/controller/nodes/task/k8s/task_exec_context.go
index bb987acbc2..3b5cd3a147 100644
--- a/flytepropeller/pkg/controller/nodes/task/k8s/task_exec_context.go
+++ b/flytepropeller/pkg/controller/nodes/task/k8s/task_exec_context.go
@@ -50,8 +50,8 @@ func newTaskExecutionMetadata(tCtx pluginsCore.TaskExecutionMetadata, taskTmpl *
var err error
secretsMap := make(map[string]string)
injectLabels := make(map[string]string)
- if taskTmpl.SecurityContext != nil && len(taskTmpl.SecurityContext.Secrets) > 0 {
- secretsMap, err = secrets.MarshalSecretsToMapStrings(taskTmpl.SecurityContext.Secrets)
+ if taskTmpl.GetSecurityContext() != nil && len(taskTmpl.GetSecurityContext().GetSecrets()) > 0 {
+ secretsMap, err = secrets.MarshalSecretsToMapStrings(taskTmpl.GetSecurityContext().GetSecrets())
if err != nil {
return TaskExecutionMetadata{}, err
}
@@ -59,7 +59,7 @@ func newTaskExecutionMetadata(tCtx pluginsCore.TaskExecutionMetadata, taskTmpl *
injectLabels[secrets.PodLabel] = secrets.PodLabelValue
}
- id := tCtx.GetSecurityContext().RunAs.ExecutionIdentity
+ id := tCtx.GetSecurityContext().RunAs.GetExecutionIdentity() //nolint:protogetter
if len(id) > 0 {
sanitizedID := k8sUtils.SanitizeLabelValue(id)
injectLabels[executionIdentityVariable] = sanitizedID
diff --git a/flytepropeller/pkg/controller/nodes/task/plugin_state_manager.go b/flytepropeller/pkg/controller/nodes/task/plugin_state_manager.go
index 3613fec916..52edbb3d70 100644
--- a/flytepropeller/pkg/controller/nodes/task/plugin_state_manager.go
+++ b/flytepropeller/pkg/controller/nodes/task/plugin_state_manager.go
@@ -75,7 +75,7 @@ func newPluginStateManager(_ context.Context, prevCodecVersion CodecVersion, pre
return &pluginStateManager{
codec: codex.GobStateCodec{},
codecVersion: GobCodecVersion,
- prevStateVersion: uint8(prevStateVersion),
+ prevStateVersion: uint8(prevStateVersion), // #nosec G115
prevState: prevState,
}, nil
}
diff --git a/flytepropeller/pkg/controller/nodes/task/secretmanager/secrets.go b/flytepropeller/pkg/controller/nodes/task/secretmanager/secrets.go
index 230017d7d3..7ffbfff9f2 100644
--- a/flytepropeller/pkg/controller/nodes/task/secretmanager/secrets.go
+++ b/flytepropeller/pkg/controller/nodes/task/secretmanager/secrets.go
@@ -50,18 +50,18 @@ func (f FileEnvSecretManager) Get(ctx context.Context, key string) (string, erro
// Prefix+SecretGroup+_+SecretKey. If the secret is not found in environment, it'll lookup the secret from files using
// the configured SecretPath / SecretGroup / SecretKey.
func (f FileEnvSecretManager) GetForSecret(ctx context.Context, secret *coreIdl.Secret) (string, error) {
- if len(secret.Group) == 0 || len(secret.Key) == 0 {
+ if len(secret.GetGroup()) == 0 || len(secret.GetKey()) == 0 {
return "", fmt.Errorf("both key and group are required parameters. Secret: [%v]", secret.String())
}
- envVar := fmt.Sprintf(envVarLookupFormatter, f.envPrefix, strings.ToUpper(secret.Group), strings.ToUpper(secret.Key))
+ envVar := fmt.Sprintf(envVarLookupFormatter, f.envPrefix, strings.ToUpper(secret.GetGroup()), strings.ToUpper(secret.GetKey()))
v, ok := os.LookupEnv(envVar)
if ok {
logger.Debugf(ctx, "Secret found %s", v)
return v, nil
}
- secretFile := filepath.Join(f.secretPath, filepath.Join(secret.Group, secret.Key))
+ secretFile := filepath.Join(f.secretPath, filepath.Join(secret.GetGroup(), secret.GetKey()))
if _, err := os.Stat(secretFile); err != nil {
if os.IsNotExist(err) {
return "", fmt.Errorf("secrets not found - Env [%s], file [%s]", envVar, secretFile)
diff --git a/flytepropeller/pkg/controller/nodes/task/taskexec_context.go b/flytepropeller/pkg/controller/nodes/task/taskexec_context.go
index 25b936a8e4..1f29060ca9 100644
--- a/flytepropeller/pkg/controller/nodes/task/taskexec_context.go
+++ b/flytepropeller/pkg/controller/nodes/task/taskexec_context.go
@@ -258,12 +258,12 @@ func (t *Handler) newTaskExecutionContext(ctx context.Context, nCtx interfaces.N
length = *l
}
- rawOutputPrefix, uniqueID, err := ComputeRawOutputPrefix(ctx, length, nCtx, currentNodeUniqueID, id.RetryAttempt)
+ rawOutputPrefix, uniqueID, err := ComputeRawOutputPrefix(ctx, length, nCtx, currentNodeUniqueID, id.GetRetryAttempt())
if err != nil {
return nil, err
}
- prevCheckpointPath, err := ComputePreviousCheckpointPath(ctx, length, nCtx, currentNodeUniqueID, id.RetryAttempt)
+ prevCheckpointPath, err := ComputePreviousCheckpointPath(ctx, length, nCtx, currentNodeUniqueID, id.GetRetryAttempt())
if err != nil {
return nil, err
}
@@ -280,9 +280,9 @@ func (t *Handler) newTaskExecutionContext(ctx context.Context, nCtx interfaces.N
}
resourceNamespacePrefix := pluginCore.ResourceNamespace(t.resourceManager.GetID()).CreateSubNamespace(pluginCore.ResourceNamespace(plugin.GetID()))
- maxAttempts := uint32(controllerconfig.GetConfig().NodeConfig.DefaultMaxAttempts)
+ maxAttempts := uint32(controllerconfig.GetConfig().NodeConfig.DefaultMaxAttempts) // #nosec G115
if nCtx.Node().GetRetryStrategy() != nil && nCtx.Node().GetRetryStrategy().MinAttempts != nil {
- maxAttempts = uint32(*nCtx.Node().GetRetryStrategy().MinAttempts)
+ maxAttempts = uint32(*nCtx.Node().GetRetryStrategy().MinAttempts) // #nosec G115
}
taskTemplatePath, err := ioutils.GetTaskTemplatePath(ctx, nCtx.DataStore(), nCtx.NodeStatus().GetDataDir())
diff --git a/flytepropeller/pkg/controller/nodes/task/taskexec_context_test.go b/flytepropeller/pkg/controller/nodes/task/taskexec_context_test.go
index e798f82a04..9a469fd25c 100644
--- a/flytepropeller/pkg/controller/nodes/task/taskexec_context_test.go
+++ b/flytepropeller/pkg/controller/nodes/task/taskexec_context_test.go
@@ -167,10 +167,10 @@ func TestHandler_newTaskExecutionContext(t *testing.T) {
assert.Equal(t, got.TaskExecutionMetadata().GetOverrides().GetResources(), resources)
assert.Equal(t, got.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName(), "name-n1-1")
- assert.Equal(t, got.TaskExecutionMetadata().GetTaskExecutionID().GetID().TaskId, taskID)
- assert.Equal(t, got.TaskExecutionMetadata().GetTaskExecutionID().GetID().RetryAttempt, uint32(1))
- assert.Equal(t, got.TaskExecutionMetadata().GetTaskExecutionID().GetID().NodeExecutionId.GetNodeId(), nodeID)
- assert.Equal(t, got.TaskExecutionMetadata().GetTaskExecutionID().GetID().NodeExecutionId.GetExecutionId(), wfExecID)
+ assert.Equal(t, got.TaskExecutionMetadata().GetTaskExecutionID().GetID().TaskId, taskID) //nolint:protogetter
+ assert.Equal(t, got.TaskExecutionMetadata().GetTaskExecutionID().GetID().RetryAttempt, uint32(1)) //nolint:protogetter
+ assert.Equal(t, got.TaskExecutionMetadata().GetTaskExecutionID().GetID().NodeExecutionId.GetNodeId(), nodeID) //nolint:protogetter
+ assert.Equal(t, got.TaskExecutionMetadata().GetTaskExecutionID().GetID().NodeExecutionId.GetExecutionId(), wfExecID) //nolint:protogetter
assert.Equal(t, got.TaskExecutionMetadata().GetTaskExecutionID().GetUniqueNodeID(), nodeID)
assert.EqualValues(t, got.ResourceManager().(resourcemanager.TaskResourceManager).GetResourcePoolInfo(), make([]*event.ResourcePoolInfo, 0))
diff --git a/flytepropeller/pkg/controller/nodes/task/transformer.go b/flytepropeller/pkg/controller/nodes/task/transformer.go
index 242c1334ce..5412994732 100644
--- a/flytepropeller/pkg/controller/nodes/task/transformer.go
+++ b/flytepropeller/pkg/controller/nodes/task/transformer.go
@@ -57,16 +57,16 @@ func ToTaskEventPhase(p pluginCore.Phase) core.TaskExecution_Phase {
func getParentNodeExecIDForTask(taskExecID *core.TaskExecutionIdentifier, execContext executors.ExecutionContext) (*core.NodeExecutionIdentifier, error) {
nodeExecutionID := &core.NodeExecutionIdentifier{
- ExecutionId: taskExecID.NodeExecutionId.ExecutionId,
+ ExecutionId: taskExecID.GetNodeExecutionId().GetExecutionId(),
}
if execContext.GetEventVersion() != v1alpha1.EventVersion0 {
- currentNodeUniqueID, err := common.GenerateUniqueID(execContext.GetParentInfo(), taskExecID.NodeExecutionId.NodeId)
+ currentNodeUniqueID, err := common.GenerateUniqueID(execContext.GetParentInfo(), taskExecID.GetNodeExecutionId().GetNodeId())
if err != nil {
return nil, err
}
nodeExecutionID.NodeId = currentNodeUniqueID
} else {
- nodeExecutionID.NodeId = taskExecID.NodeExecutionId.NodeId
+ nodeExecutionID.NodeId = taskExecID.GetNodeExecutionId().GetNodeId()
}
return nodeExecutionID, nil
}
@@ -145,9 +145,9 @@ func ToTaskExecutionEvent(input ToTaskExecutionEventInputs) (*event.TaskExecutio
})
}
tev := &event.TaskExecutionEvent{
- TaskId: taskExecID.TaskId,
+ TaskId: taskExecID.GetTaskId(),
ParentNodeExecutionId: nodeExecutionID,
- RetryAttempt: taskExecID.RetryAttempt,
+ RetryAttempt: taskExecID.GetRetryAttempt(),
Phase: ToTaskEventPhase(input.Info.Phase()),
PhaseVersion: input.Info.Version(),
ProducerId: input.ClusterID,
diff --git a/flytepropeller/pkg/controller/nodes/task/transformer_test.go b/flytepropeller/pkg/controller/nodes/task/transformer_test.go
index db89dda3e6..825b58a2ab 100644
--- a/flytepropeller/pkg/controller/nodes/task/transformer_test.go
+++ b/flytepropeller/pkg/controller/nodes/task/transformer_test.go
@@ -99,21 +99,21 @@ func TestToTaskExecutionEvent(t *testing.T) {
},
})
assert.NoError(t, err)
- assert.Nil(t, tev.Logs)
- assert.Equal(t, core.TaskExecution_WAITING_FOR_RESOURCES, tev.Phase)
- assert.Equal(t, uint32(0), tev.PhaseVersion)
- assert.Equal(t, np, tev.OccurredAt)
- assert.Equal(t, tkID, tev.TaskId)
- assert.Equal(t, nodeID, tev.ParentNodeExecutionId)
+ assert.Nil(t, tev.GetLogs())
+ assert.Equal(t, core.TaskExecution_WAITING_FOR_RESOURCES, tev.GetPhase())
+ assert.Equal(t, uint32(0), tev.GetPhaseVersion())
+ assert.Equal(t, np, tev.GetOccurredAt())
+ assert.Equal(t, tkID, tev.GetTaskId())
+ assert.Equal(t, nodeID, tev.GetParentNodeExecutionId())
assert.Equal(t, inputPath, tev.GetInputUri())
- assert.Nil(t, tev.OutputResult)
- assert.Equal(t, event.TaskExecutionMetadata_INTERRUPTIBLE, tev.Metadata.InstanceClass)
- assert.Equal(t, containerTaskType, tev.TaskType)
- assert.Equal(t, "reason", tev.Reason)
- assert.Equal(t, containerPluginIdentifier, tev.Metadata.PluginIdentifier)
- assert.Equal(t, generatedName, tev.Metadata.GeneratedName)
- assert.EqualValues(t, resourcePoolInfo, tev.Metadata.ResourcePoolInfo)
- assert.Equal(t, testClusterID, tev.ProducerId)
+ assert.Nil(t, tev.GetOutputResult())
+ assert.Equal(t, event.TaskExecutionMetadata_INTERRUPTIBLE, tev.GetMetadata().GetInstanceClass())
+ assert.Equal(t, containerTaskType, tev.GetTaskType())
+ assert.Equal(t, "reason", tev.GetReason())
+ assert.Equal(t, containerPluginIdentifier, tev.GetMetadata().GetPluginIdentifier())
+ assert.Equal(t, generatedName, tev.GetMetadata().GetGeneratedName())
+ assert.EqualValues(t, resourcePoolInfo, tev.GetMetadata().GetResourcePoolInfo())
+ assert.Equal(t, testClusterID, tev.GetProducerId())
l := []*core.TaskLog{
{Uri: "x", Name: "y", MessageFormat: core.TaskLog_JSON},
@@ -139,21 +139,21 @@ func TestToTaskExecutionEvent(t *testing.T) {
},
})
assert.NoError(t, err)
- assert.Equal(t, core.TaskExecution_RUNNING, tev.Phase)
- assert.Equal(t, uint32(1), tev.PhaseVersion)
- assert.Equal(t, l, tev.Logs)
- assert.Equal(t, c, tev.CustomInfo)
- assert.Equal(t, np, tev.OccurredAt)
- assert.Equal(t, tkID, tev.TaskId)
- assert.Equal(t, nodeID, tev.ParentNodeExecutionId)
+ assert.Equal(t, core.TaskExecution_RUNNING, tev.GetPhase())
+ assert.Equal(t, uint32(1), tev.GetPhaseVersion())
+ assert.Equal(t, l, tev.GetLogs())
+ assert.Equal(t, c, tev.GetCustomInfo())
+ assert.Equal(t, np, tev.GetOccurredAt())
+ assert.Equal(t, tkID, tev.GetTaskId())
+ assert.Equal(t, nodeID, tev.GetParentNodeExecutionId())
assert.Equal(t, inputPath, tev.GetInputUri())
- assert.Nil(t, tev.OutputResult)
- assert.Equal(t, event.TaskExecutionMetadata_INTERRUPTIBLE, tev.Metadata.InstanceClass)
- assert.Equal(t, containerTaskType, tev.TaskType)
- assert.Equal(t, containerPluginIdentifier, tev.Metadata.PluginIdentifier)
- assert.Equal(t, generatedName, tev.Metadata.GeneratedName)
- assert.EqualValues(t, resourcePoolInfo, tev.Metadata.ResourcePoolInfo)
- assert.Equal(t, testClusterID, tev.ProducerId)
+ assert.Nil(t, tev.GetOutputResult())
+ assert.Equal(t, event.TaskExecutionMetadata_INTERRUPTIBLE, tev.GetMetadata().GetInstanceClass())
+ assert.Equal(t, containerTaskType, tev.GetTaskType())
+ assert.Equal(t, containerPluginIdentifier, tev.GetMetadata().GetPluginIdentifier())
+ assert.Equal(t, generatedName, tev.GetMetadata().GetGeneratedName())
+ assert.EqualValues(t, resourcePoolInfo, tev.GetMetadata().GetResourcePoolInfo())
+ assert.Equal(t, testClusterID, tev.GetProducerId())
defaultNodeExecutionMetadata := nodemocks.NodeExecutionMetadata{}
defaultNodeExecutionMetadata.OnIsInterruptible().Return(false)
@@ -177,23 +177,23 @@ func TestToTaskExecutionEvent(t *testing.T) {
},
})
assert.NoError(t, err)
- assert.Equal(t, core.TaskExecution_SUCCEEDED, tev.Phase)
- assert.Equal(t, uint32(0), tev.PhaseVersion)
- assert.Equal(t, l, tev.Logs)
- assert.Equal(t, c, tev.CustomInfo)
- assert.Equal(t, np, tev.OccurredAt)
- assert.Equal(t, np, tev.OccurredAt)
- assert.Equal(t, tkID, tev.TaskId)
- assert.Equal(t, nodeID, tev.ParentNodeExecutionId)
- assert.NotNil(t, tev.OutputResult)
+ assert.Equal(t, core.TaskExecution_SUCCEEDED, tev.GetPhase())
+ assert.Equal(t, uint32(0), tev.GetPhaseVersion())
+ assert.Equal(t, l, tev.GetLogs())
+ assert.Equal(t, c, tev.GetCustomInfo())
+ assert.Equal(t, np, tev.GetOccurredAt())
+ assert.Equal(t, np, tev.GetOccurredAt())
+ assert.Equal(t, tkID, tev.GetTaskId())
+ assert.Equal(t, nodeID, tev.GetParentNodeExecutionId())
+ assert.NotNil(t, tev.GetOutputResult())
assert.Equal(t, inputPath, tev.GetInputUri())
assert.Equal(t, outputPath, tev.GetOutputUri())
- assert.Empty(t, event.TaskExecutionMetadata_DEFAULT, tev.Metadata.InstanceClass)
- assert.Equal(t, containerTaskType, tev.TaskType)
- assert.Equal(t, containerPluginIdentifier, tev.Metadata.PluginIdentifier)
- assert.Equal(t, generatedName, tev.Metadata.GeneratedName)
- assert.EqualValues(t, resourcePoolInfo, tev.Metadata.ResourcePoolInfo)
- assert.Equal(t, testClusterID, tev.ProducerId)
+ assert.Empty(t, event.TaskExecutionMetadata_DEFAULT, tev.GetMetadata().GetInstanceClass())
+ assert.Equal(t, containerTaskType, tev.GetTaskType())
+ assert.Equal(t, containerPluginIdentifier, tev.GetMetadata().GetPluginIdentifier())
+ assert.Equal(t, generatedName, tev.GetMetadata().GetGeneratedName())
+ assert.EqualValues(t, resourcePoolInfo, tev.GetMetadata().GetResourcePoolInfo())
+ assert.Equal(t, testClusterID, tev.GetProducerId())
t.Run("inline event policy", func(t *testing.T) {
inputs := &core.LiteralMap{
@@ -297,21 +297,21 @@ func TestToTaskExecutionEventWithParent(t *testing.T) {
expectedNodeID := &core.NodeExecutionIdentifier{
NodeId: "fmxzd5ta",
}
- assert.Nil(t, tev.Logs)
- assert.Equal(t, core.TaskExecution_WAITING_FOR_RESOURCES, tev.Phase)
- assert.Equal(t, uint32(0), tev.PhaseVersion)
- assert.Equal(t, np, tev.OccurredAt)
- assert.Equal(t, tkID, tev.TaskId)
- assert.Equal(t, expectedNodeID, tev.ParentNodeExecutionId)
+ assert.Nil(t, tev.GetLogs())
+ assert.Equal(t, core.TaskExecution_WAITING_FOR_RESOURCES, tev.GetPhase())
+ assert.Equal(t, uint32(0), tev.GetPhaseVersion())
+ assert.Equal(t, np, tev.GetOccurredAt())
+ assert.Equal(t, tkID, tev.GetTaskId())
+ assert.Equal(t, expectedNodeID, tev.GetParentNodeExecutionId())
assert.Equal(t, inputPath, tev.GetInputUri())
- assert.Nil(t, tev.OutputResult)
- assert.Equal(t, event.TaskExecutionMetadata_INTERRUPTIBLE, tev.Metadata.InstanceClass)
- assert.Equal(t, containerTaskType, tev.TaskType)
- assert.Equal(t, "reason", tev.Reason)
- assert.Equal(t, containerPluginIdentifier, tev.Metadata.PluginIdentifier)
- assert.Equal(t, generatedName, tev.Metadata.GeneratedName)
- assert.EqualValues(t, resourcePoolInfo, tev.Metadata.ResourcePoolInfo)
- assert.Equal(t, testClusterID, tev.ProducerId)
+ assert.Nil(t, tev.GetOutputResult())
+ assert.Equal(t, event.TaskExecutionMetadata_INTERRUPTIBLE, tev.GetMetadata().GetInstanceClass())
+ assert.Equal(t, containerTaskType, tev.GetTaskType())
+ assert.Equal(t, "reason", tev.GetReason())
+ assert.Equal(t, containerPluginIdentifier, tev.GetMetadata().GetPluginIdentifier())
+ assert.Equal(t, generatedName, tev.GetMetadata().GetGeneratedName())
+ assert.EqualValues(t, resourcePoolInfo, tev.GetMetadata().GetResourcePoolInfo())
+ assert.Equal(t, testClusterID, tev.GetProducerId())
l := []*core.TaskLog{
{Uri: "x", Name: "y", MessageFormat: core.TaskLog_JSON},
@@ -337,19 +337,19 @@ func TestToTaskExecutionEventWithParent(t *testing.T) {
},
})
assert.NoError(t, err)
- assert.Equal(t, core.TaskExecution_RUNNING, tev.Phase)
- assert.Equal(t, uint32(1), tev.PhaseVersion)
- assert.Equal(t, l, tev.Logs)
- assert.Equal(t, c, tev.CustomInfo)
- assert.Equal(t, np, tev.OccurredAt)
- assert.Equal(t, tkID, tev.TaskId)
- assert.Equal(t, expectedNodeID, tev.ParentNodeExecutionId)
+ assert.Equal(t, core.TaskExecution_RUNNING, tev.GetPhase())
+ assert.Equal(t, uint32(1), tev.GetPhaseVersion())
+ assert.Equal(t, l, tev.GetLogs())
+ assert.Equal(t, c, tev.GetCustomInfo())
+ assert.Equal(t, np, tev.GetOccurredAt())
+ assert.Equal(t, tkID, tev.GetTaskId())
+ assert.Equal(t, expectedNodeID, tev.GetParentNodeExecutionId())
assert.Equal(t, inputPath, tev.GetInputUri())
- assert.Nil(t, tev.OutputResult)
- assert.Equal(t, event.TaskExecutionMetadata_INTERRUPTIBLE, tev.Metadata.InstanceClass)
- assert.Equal(t, containerTaskType, tev.TaskType)
- assert.Equal(t, containerPluginIdentifier, tev.Metadata.PluginIdentifier)
- assert.Equal(t, generatedName, tev.Metadata.GeneratedName)
- assert.EqualValues(t, resourcePoolInfo, tev.Metadata.ResourcePoolInfo)
- assert.Equal(t, testClusterID, tev.ProducerId)
+ assert.Nil(t, tev.GetOutputResult())
+ assert.Equal(t, event.TaskExecutionMetadata_INTERRUPTIBLE, tev.GetMetadata().GetInstanceClass())
+ assert.Equal(t, containerTaskType, tev.GetTaskType())
+ assert.Equal(t, containerPluginIdentifier, tev.GetMetadata().GetPluginIdentifier())
+ assert.Equal(t, generatedName, tev.GetMetadata().GetGeneratedName())
+ assert.EqualValues(t, resourcePoolInfo, tev.GetMetadata().GetResourcePoolInfo())
+ assert.Equal(t, testClusterID, tev.GetProducerId())
}
diff --git a/flytepropeller/pkg/controller/nodes/task_reader.go b/flytepropeller/pkg/controller/nodes/task_reader.go
index 5cc5654f63..baf8123944 100644
--- a/flytepropeller/pkg/controller/nodes/task_reader.go
+++ b/flytepropeller/pkg/controller/nodes/task_reader.go
@@ -12,7 +12,7 @@ type taskReader struct {
}
func (t taskReader) GetTaskType() v1alpha1.TaskType {
- return t.TaskTemplate.Type
+ return t.TaskTemplate.GetType()
}
func (t taskReader) GetTaskID() *core.Identifier {
diff --git a/flytepropeller/pkg/controller/nodes/transformers.go b/flytepropeller/pkg/controller/nodes/transformers.go
index a252d17344..9d19081cc4 100644
--- a/flytepropeller/pkg/controller/nodes/transformers.go
+++ b/flytepropeller/pkg/controller/nodes/transformers.go
@@ -91,7 +91,7 @@ func ToNodeExecutionEvent(
return nil, nil
}
if info.GetPhase() == handler.EPhaseUndefined {
- return nil, fmt.Errorf("illegal state, undefined phase received for node [%s]", nodeExecID.NodeId)
+ return nil, fmt.Errorf("illegal state, undefined phase received for node [%s]", nodeExecID.GetNodeId())
}
occurredTime, err := ptypes.TimestampProto(info.GetOccurredAt())
if err != nil {
@@ -115,7 +115,7 @@ func ToNodeExecutionEvent(
// Start node is special case where the Outputs are the same and hence here we copy the Output file
// into the OutputResult and in admin we copy it over into input as well.
// Start node doesn't have inputs.
- if nodeExecID.NodeId == v1alpha1.StartNodeID {
+ if nodeExecID.GetNodeId() == v1alpha1.StartNodeID {
outputsFile := v1alpha1.GetOutputsFile(status.GetOutputDir())
nev = &event.NodeExecutionEvent{
Id: nodeExecID,
@@ -162,7 +162,7 @@ func ToNodeExecutionEvent(
}
if eventVersion != v1alpha1.EventVersion0 {
- currentNodeUniqueID, err := common.GenerateUniqueID(parentInfo, nev.Id.NodeId)
+ currentNodeUniqueID, err := common.GenerateUniqueID(parentInfo, nev.GetId().GetNodeId())
if err != nil {
return nil, err
}
@@ -210,7 +210,7 @@ func ToNodeExecutionEvent(
}
} else if dynamicNodePhase != v1alpha1.DynamicNodePhaseNone {
nev.IsDynamic = true
- if nev.GetTaskNodeMetadata() != nil && nev.GetTaskNodeMetadata().DynamicWorkflow != nil {
+ if nev.GetTaskNodeMetadata() != nil && nev.GetTaskNodeMetadata().GetDynamicWorkflow() != nil {
nev.IsParent = true
}
}
diff --git a/flytepropeller/pkg/controller/nodes/transformers_test.go b/flytepropeller/pkg/controller/nodes/transformers_test.go
index 93a532a8d6..0bbc02f123 100644
--- a/flytepropeller/pkg/controller/nodes/transformers_test.go
+++ b/flytepropeller/pkg/controller/nodes/transformers_test.go
@@ -56,10 +56,10 @@ func TestToNodeExecutionEvent(t *testing.T) {
RawOutputPolicy: config.RawOutputPolicyReference,
}, nil)
assert.NoError(t, err)
- assert.True(t, nev.IsDynamic)
- assert.True(t, nev.IsParent)
- assert.Equal(t, nodeExecutionEventVersion, nev.EventVersion)
- assert.True(t, nev.IsInDynamicChain)
+ assert.True(t, nev.GetIsDynamic())
+ assert.True(t, nev.GetIsParent())
+ assert.Equal(t, nodeExecutionEventVersion, nev.GetEventVersion())
+ assert.True(t, nev.GetIsInDynamicChain())
})
t.Run("is parent", func(t *testing.T) {
info := handler.PhaseInfoDynamicRunning(&handler.ExecutionInfo{TaskNodeInfo: &handler.TaskNodeInfo{
@@ -92,9 +92,9 @@ func TestToNodeExecutionEvent(t *testing.T) {
RawOutputPolicy: config.RawOutputPolicyReference,
}, nil)
assert.NoError(t, err)
- assert.False(t, nev.IsDynamic)
- assert.True(t, nev.IsParent)
- assert.Equal(t, nodeExecutionEventVersion, nev.EventVersion)
+ assert.False(t, nev.GetIsDynamic())
+ assert.True(t, nev.GetIsParent())
+ assert.Equal(t, nodeExecutionEventVersion, nev.GetEventVersion())
})
t.Run("inline events", func(t *testing.T) {
inputs := &core.LiteralMap{
diff --git a/flytepropeller/pkg/controller/workflow/executor.go b/flytepropeller/pkg/controller/workflow/executor.go
index 1982b405cb..14a3770cff 100644
--- a/flytepropeller/pkg/controller/workflow/executor.go
+++ b/flytepropeller/pkg/controller/workflow/executor.go
@@ -295,7 +295,7 @@ func (c *workflowExecutor) IdempotentReportEvent(ctx context.Context, e *event.W
err := c.wfRecorder.RecordWorkflowEvent(ctx, e, c.eventConfig)
if err != nil && eventsErr.IsAlreadyExists(err) {
logger.Infof(ctx, "Workflow event phase: %s, executionId %s already exist",
- e.Phase.String(), e.ExecutionId)
+ e.GetPhase().String(), e.GetExecutionId())
return nil
}
return err
@@ -370,21 +370,21 @@ func (c *workflowExecutor) TransitionToPhase(ctx context.Context, execID *core.W
if recordingErr := c.IdempotentReportEvent(ctx, wfEvent); recordingErr != nil {
if eventsErr.IsAlreadyExists(recordingErr) {
- logger.Warningf(ctx, "Failed to record workflowEvent, error [%s]. Trying to record state: %s. Ignoring this error!", recordingErr.Error(), wfEvent.Phase)
+ logger.Warningf(ctx, "Failed to record workflowEvent, error [%s]. Trying to record state: %s. Ignoring this error!", recordingErr.Error(), wfEvent.GetPhase())
return nil
}
if eventsErr.IsEventAlreadyInTerminalStateError(recordingErr) {
// Move to WorkflowPhaseFailed for state mismatch
- msg := fmt.Sprintf("workflow state mismatch between propeller and control plane; Propeller State: %s, ExecutionId %s", wfEvent.Phase.String(), wfEvent.ExecutionId)
+ msg := fmt.Sprintf("workflow state mismatch between propeller and control plane; Propeller State: %s, ExecutionId %s", wfEvent.GetPhase().String(), wfEvent.GetExecutionId())
logger.Warningf(ctx, msg)
wStatus.UpdatePhase(v1alpha1.WorkflowPhaseFailed, msg, nil)
return nil
}
- if (wfEvent.Phase == core.WorkflowExecution_FAILING || wfEvent.Phase == core.WorkflowExecution_FAILED) &&
+ if (wfEvent.GetPhase() == core.WorkflowExecution_FAILING || wfEvent.GetPhase() == core.WorkflowExecution_FAILED) &&
(eventsErr.IsNotFound(recordingErr) || eventsErr.IsEventIncompatibleClusterError(recordingErr)) {
// Don't stall the workflow transition to terminated (so that resources can be cleaned up) since these events
// are being discarded by the back-end anyways.
- logger.Infof(ctx, "Failed to record %s workflowEvent, error [%s]. Ignoring this error!", wfEvent.Phase.String(), recordingErr.Error())
+ logger.Infof(ctx, "Failed to record %s workflowEvent, error [%s]. Ignoring this error!", wfEvent.GetPhase().String(), recordingErr.Error())
return nil
}
logger.Warningf(ctx, "Event recording failed. Error [%s]", recordingErr.Error())
@@ -461,7 +461,7 @@ func (c *workflowExecutor) HandleFlyteWorkflow(ctx context.Context, w *v1alpha1.
case v1alpha1.WorkflowPhaseHandlingFailureNode:
newStatus, err := c.handleFailureNode(ctx, w)
if err != nil {
- return errors.Errorf("failed to handle failure node for workflow [%s], err: [%s]", w.ID, err.Error())
+ return errors.Errorf("failed to handle failure node for workflow [%s], err: [%s]", w.ID, err.Error()) //nolint:govet,staticcheck
}
failureErr := c.TransitionToPhase(ctx, w.ExecutionID.WorkflowExecutionIdentifier, wStatus, newStatus)
// Ignore ExecutionNotFound and IncompatibleCluster errors to allow graceful failure
diff --git a/flytepropeller/pkg/controller/workflow/executor_test.go b/flytepropeller/pkg/controller/workflow/executor_test.go
index 2be7238dbb..187aac7ead 100644
--- a/flytepropeller/pkg/controller/workflow/executor_test.go
+++ b/flytepropeller/pkg/controller/workflow/executor_test.go
@@ -88,14 +88,14 @@ func (f fakeRemoteWritePlugin) Handle(ctx context.Context, tCtx pluginCore.TaskE
if trns.Info().Phase() == pluginCore.PhaseSuccess {
tk, err := tCtx.TaskReader().Read(ctx)
assert.NoError(f.t, err)
- outputVars := tk.GetInterface().Outputs.Variables
+ outputVars := tk.GetInterface().GetOutputs().GetVariables()
o := &core.LiteralMap{
Literals: make(map[string]*core.Literal, len(outputVars)),
}
for k, v := range outputVars {
- l, err := coreutils.MakeDefaultLiteralForType(v.Type)
+ l, err := coreutils.MakeDefaultLiteralForType(v.GetType())
if f.enableAsserts && !assert.NoError(f.t, err) {
- assert.FailNow(f.t, "Failed to create default output for node [%v] Type [%v]", tCtx.TaskExecutionMetadata().GetTaskExecutionID(), v.Type)
+ assert.FailNow(f.t, "Failed to create default output for node [%v] Type [%v]", tCtx.TaskExecutionMetadata().GetTaskExecutionID(), v.GetType())
}
o.Literals[k] = l
}
@@ -488,21 +488,21 @@ func TestWorkflowExecutor_HandleFlyteWorkflow_Failing(t *testing.T) {
if ok {
assert.True(t, ok)
- switch e.Phase {
+ switch e.GetPhase() {
case core.WorkflowExecution_RUNNING:
- assert.WithinDuration(t, e.OccurredAt.AsTime(), time.Now(), time.Millisecond*5)
- assert.Equal(t, testClusterID, e.ProducerId)
+ assert.WithinDuration(t, e.GetOccurredAt().AsTime(), time.Now(), time.Millisecond*5)
+ assert.Equal(t, testClusterID, e.GetProducerId())
recordedRunning = true
case core.WorkflowExecution_FAILING:
- assert.WithinDuration(t, e.OccurredAt.AsTime(), time.Now(), time.Millisecond*5)
- assert.Equal(t, testClusterID, e.ProducerId)
+ assert.WithinDuration(t, e.GetOccurredAt().AsTime(), time.Now(), time.Millisecond*5)
+ assert.Equal(t, testClusterID, e.GetProducerId())
recordedFailing = true
case core.WorkflowExecution_FAILED:
- assert.WithinDuration(t, e.OccurredAt.AsTime(), time.Now(), time.Millisecond*5)
- assert.Equal(t, testClusterID, e.ProducerId)
+ assert.WithinDuration(t, e.GetOccurredAt().AsTime(), time.Now(), time.Millisecond*5)
+ assert.Equal(t, testClusterID, e.GetProducerId())
recordedFailed = true
default:
- return fmt.Errorf("MockWorkflowRecorder should not have entered into any other states [%v]", e.Phase)
+ return fmt.Errorf("MockWorkflowRecorder should not have entered into any other states [%v]", e.GetPhase())
}
}
return nil
@@ -591,30 +591,30 @@ func TestWorkflowExecutor_HandleFlyteWorkflow_Events(t *testing.T) {
eventSink.SinkCb = func(ctx context.Context, message proto.Message) error {
e, ok := message.(*event.WorkflowExecutionEvent)
if ok {
- switch e.Phase {
+ switch e.GetPhase() {
case core.WorkflowExecution_RUNNING:
- occuredAt, err := ptypes.Timestamp(e.OccurredAt)
+ occuredAt, err := ptypes.Timestamp(e.GetOccurredAt())
assert.NoError(t, err)
assert.WithinDuration(t, occuredAt, time.Now(), time.Millisecond*5)
- assert.Equal(t, testClusterID, e.ProducerId)
+ assert.Equal(t, testClusterID, e.GetProducerId())
recordedRunning = true
case core.WorkflowExecution_SUCCEEDING:
- occuredAt, err := ptypes.Timestamp(e.OccurredAt)
+ occuredAt, err := ptypes.Timestamp(e.GetOccurredAt())
assert.NoError(t, err)
assert.WithinDuration(t, occuredAt, time.Now(), time.Millisecond*5)
- assert.Equal(t, testClusterID, e.ProducerId)
+ assert.Equal(t, testClusterID, e.GetProducerId())
recordedFailing = true
case core.WorkflowExecution_SUCCEEDED:
- occuredAt, err := ptypes.Timestamp(e.OccurredAt)
+ occuredAt, err := ptypes.Timestamp(e.GetOccurredAt())
assert.NoError(t, err)
assert.WithinDuration(t, occuredAt, time.Now(), time.Millisecond*5)
- assert.Equal(t, testClusterID, e.ProducerId)
+ assert.Equal(t, testClusterID, e.GetProducerId())
recordedSuccess = true
default:
- return fmt.Errorf("MockWorkflowRecorder should not have entered into any other states, received [%v]", e.Phase.String())
+ return fmt.Errorf("MockWorkflowRecorder should not have entered into any other states, received [%v]", e.GetPhase().String())
}
}
return nil
@@ -819,7 +819,7 @@ func TestWorkflowExecutor_HandleAbortedWorkflow(t *testing.T) {
nodeExec := &nodemocks.Node{}
wfRecorder := &eventMocks.WorkflowEventRecorder{}
wfRecorder.On("RecordWorkflowEvent", mock.Anything, mock.MatchedBy(func(ev *event.WorkflowExecutionEvent) bool {
- assert.Equal(t, testClusterID, ev.ProducerId)
+ assert.Equal(t, testClusterID, ev.GetProducerId())
evs = append(evs, ev)
return true
}), mock.Anything).Return(nil)
@@ -861,7 +861,7 @@ func TestWorkflowExecutor_HandleAbortedWorkflow(t *testing.T) {
nodeExec := &nodemocks.Node{}
wfRecorder := &eventMocks.WorkflowEventRecorder{}
wfRecorder.OnRecordWorkflowEventMatch(mock.Anything, mock.MatchedBy(func(ev *event.WorkflowExecutionEvent) bool {
- assert.Equal(t, testClusterID, ev.ProducerId)
+ assert.Equal(t, testClusterID, ev.GetProducerId())
evs = append(evs, ev)
return true
}), mock.Anything).Return(nil)
@@ -902,7 +902,7 @@ func TestWorkflowExecutor_HandleAbortedWorkflow(t *testing.T) {
nodeExec := &nodemocks.Node{}
wfRecorder := &eventMocks.WorkflowEventRecorder{}
wfRecorder.OnRecordWorkflowEventMatch(mock.Anything, mock.MatchedBy(func(ev *event.WorkflowExecutionEvent) bool {
- assert.Equal(t, testClusterID, ev.ProducerId)
+ assert.Equal(t, testClusterID, ev.GetProducerId())
evs = append(evs, ev)
return true
}), mock.Anything).Return(nil)
diff --git a/flytepropeller/pkg/controller/workflowstore/execution_stats_test.go b/flytepropeller/pkg/controller/workflowstore/execution_stats_test.go
index 783e2ba688..b3c7bb44fe 100644
--- a/flytepropeller/pkg/controller/workflowstore/execution_stats_test.go
+++ b/flytepropeller/pkg/controller/workflowstore/execution_stats_test.go
@@ -89,7 +89,7 @@ func TestConcurrentAccess(t *testing.T) {
go func(id int) {
defer wg.Done()
execID := fmt.Sprintf("exec%d", id)
- err := esh.AddOrUpdateEntry(execID, SingleExecutionStats{ActiveNodeCount: uint32(id), ActiveTaskCount: uint32(id * 2)})
+ err := esh.AddOrUpdateEntry(execID, SingleExecutionStats{ActiveNodeCount: uint32(id), ActiveTaskCount: uint32(id * 2)}) // #nosec G115
assert.NoError(t, err)
}(i)
}
diff --git a/flytepropeller/pkg/utils/assert/literals.go b/flytepropeller/pkg/utils/assert/literals.go
index 66f57c328e..c0fac675ed 100644
--- a/flytepropeller/pkg/utils/assert/literals.go
+++ b/flytepropeller/pkg/utils/assert/literals.go
@@ -16,14 +16,14 @@ func EqualPrimitive(t *testing.T, p1 *core.Primitive, p2 *core.Primitive) {
if p1 == nil {
return
}
- assert.Equal(t, reflect.TypeOf(p1.Value), reflect.TypeOf(p2.Value))
- switch p1.Value.(type) {
+ assert.Equal(t, reflect.TypeOf(p1.GetValue()), reflect.TypeOf(p2.GetValue()))
+ switch p1.GetValue().(type) {
case *core.Primitive_Integer:
assert.Equal(t, p1.GetInteger(), p2.GetInteger())
case *core.Primitive_StringValue:
assert.Equal(t, p1.GetStringValue(), p2.GetStringValue())
default:
- assert.FailNow(t, "Not yet implemented for types %v", reflect.TypeOf(p1.Value))
+ assert.FailNow(t, "Not yet implemented for types %v", reflect.TypeOf(p1.GetValue()))
}
}
@@ -34,12 +34,12 @@ func EqualScalar(t *testing.T, p1 *core.Scalar, p2 *core.Scalar) {
if p1 == nil {
return
}
- assert.Equal(t, reflect.TypeOf(p1.Value), reflect.TypeOf(p2.Value))
- switch p1.Value.(type) {
+ assert.Equal(t, reflect.TypeOf(p1.GetValue()), reflect.TypeOf(p2.GetValue()))
+ switch p1.GetValue().(type) {
case *core.Scalar_Primitive:
EqualPrimitive(t, p1.GetPrimitive(), p2.GetPrimitive())
default:
- assert.FailNow(t, "Not yet implemented for types %v", reflect.TypeOf(p1.Value))
+ assert.FailNow(t, "Not yet implemented for types %v", reflect.TypeOf(p1.GetValue()))
}
}
@@ -50,8 +50,8 @@ func EqualLiterals(t *testing.T, l1 *core.Literal, l2 *core.Literal) {
if l1 == nil {
return
}
- assert.Equal(t, reflect.TypeOf(l1.Value), reflect.TypeOf(l2.Value))
- switch l1.Value.(type) {
+ assert.Equal(t, reflect.TypeOf(l1.GetValue()), reflect.TypeOf(l2.GetValue()))
+ switch l1.GetValue().(type) {
case *core.Literal_Scalar:
EqualScalar(t, l1.GetScalar(), l2.GetScalar())
case *core.Literal_Map:
@@ -63,9 +63,9 @@ func EqualLiterals(t *testing.T, l1 *core.Literal, l2 *core.Literal) {
func EqualLiteralMap(t *testing.T, l1 *core.LiteralMap, l2 *core.LiteralMap) {
if assert.NotNil(t, l1, "l1 is nil") && assert.NotNil(t, l2, "l2 is nil") {
- assert.Equal(t, len(l1.Literals), len(l2.Literals))
- for k, v := range l1.Literals {
- actual, ok := l2.Literals[k]
+ assert.Equal(t, len(l1.GetLiterals()), len(l2.GetLiterals()))
+ for k, v := range l1.GetLiterals() {
+ actual, ok := l2.GetLiterals()[k]
assert.True(t, ok)
EqualLiterals(t, v, actual)
}
@@ -74,9 +74,9 @@ func EqualLiteralMap(t *testing.T, l1 *core.LiteralMap, l2 *core.LiteralMap) {
func EqualLiteralCollection(t *testing.T, l1 *core.LiteralCollection, l2 *core.LiteralCollection) {
if assert.NotNil(t, l2) {
- assert.Equal(t, len(l1.Literals), len(l2.Literals))
- for i, v := range l1.Literals {
- EqualLiterals(t, v, l2.Literals[i])
+ assert.Equal(t, len(l1.GetLiterals()), len(l2.GetLiterals()))
+ for i, v := range l1.GetLiterals() {
+ EqualLiterals(t, v, l2.GetLiterals()[i])
}
}
}
diff --git a/flytepropeller/pkg/utils/bindings_test.go b/flytepropeller/pkg/utils/bindings_test.go
index c6cb5fcc12..8067e256eb 100644
--- a/flytepropeller/pkg/utils/bindings_test.go
+++ b/flytepropeller/pkg/utils/bindings_test.go
@@ -18,7 +18,7 @@ func TestMakePrimitiveBinding(t *testing.T) {
assert.Equal(t, "x", x.GetVar())
p := x.GetBinding()
assert.NotNil(t, p.GetScalar())
- assert.Equal(t, "*core.Primitive_FloatValue", reflect.TypeOf(p.GetScalar().GetPrimitive().Value).String())
+ assert.Equal(t, "*core.Primitive_FloatValue", reflect.TypeOf(p.GetScalar().GetPrimitive().GetValue()).String())
assert.Equal(t, v, p.GetScalar().GetPrimitive().GetFloatValue())
}
{
@@ -36,7 +36,7 @@ func TestMustMakePrimitiveBinding(t *testing.T) {
assert.Equal(t, "x", x.GetVar())
p := x.GetBinding()
assert.NotNil(t, p.GetScalar())
- assert.Equal(t, "*core.Primitive_FloatValue", reflect.TypeOf(p.GetScalar().GetPrimitive().Value).String())
+ assert.Equal(t, "*core.Primitive_FloatValue", reflect.TypeOf(p.GetScalar().GetPrimitive().GetValue()).String())
assert.Equal(t, v, p.GetScalar().GetPrimitive().GetFloatValue())
}
{
@@ -62,26 +62,26 @@ func TestMakeBindingDataCollection(t *testing.T) {
)
assert.NotNil(t, c.GetCollection())
- assert.Equal(t, 2, len(c.GetCollection().Bindings))
+ assert.Equal(t, 2, len(c.GetCollection().GetBindings()))
{
p := c.GetCollection().GetBindings()[0]
assert.NotNil(t, p.GetScalar())
- assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.GetScalar().GetPrimitive().Value).String())
+ assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.GetScalar().GetPrimitive().GetValue()).String())
assert.Equal(t, v1, p.GetScalar().GetPrimitive().GetInteger())
}
{
p := c.GetCollection().GetBindings()[1]
assert.NotNil(t, p.GetScalar())
- assert.Equal(t, "*core.Primitive_StringValue", reflect.TypeOf(p.GetScalar().GetPrimitive().Value).String())
+ assert.Equal(t, "*core.Primitive_StringValue", reflect.TypeOf(p.GetScalar().GetPrimitive().GetValue()).String())
assert.Equal(t, v2, p.GetScalar().GetPrimitive().GetStringValue())
}
assert.NotNil(t, c2.GetCollection())
- assert.Equal(t, 2, len(c2.GetCollection().Bindings))
+ assert.Equal(t, 2, len(c2.GetCollection().GetBindings()))
{
p := c2.GetCollection().GetBindings()[0]
assert.NotNil(t, p.GetScalar())
- assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.GetScalar().GetPrimitive().Value).String())
+ assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.GetScalar().GetPrimitive().GetValue()).String())
assert.Equal(t, v1, p.GetScalar().GetPrimitive().GetInteger())
}
{
@@ -113,7 +113,7 @@ func TestMakeBindingDataMap(t *testing.T) {
{
p := m.GetMap().GetBindings()["x"]
assert.NotNil(t, p.GetScalar())
- assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.GetScalar().GetPrimitive().Value).String())
+ assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.GetScalar().GetPrimitive().GetValue()).String())
assert.Equal(t, v1, p.GetScalar().GetPrimitive().GetInteger())
}
{
@@ -127,7 +127,7 @@ func TestMakeBindingDataMap(t *testing.T) {
{
p := m2.GetMap().GetBindings()["x"]
assert.NotNil(t, p.GetScalar())
- assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.GetScalar().GetPrimitive().Value).String())
+ assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.GetScalar().GetPrimitive().GetValue()).String())
assert.Equal(t, v1, p.GetScalar().GetPrimitive().GetInteger())
}
{
diff --git a/flytepropeller/pkg/utils/k8s.go b/flytepropeller/pkg/utils/k8s.go
index f666fd9013..7ef53ead38 100644
--- a/flytepropeller/pkg/utils/k8s.go
+++ b/flytepropeller/pkg/utils/k8s.go
@@ -37,7 +37,7 @@ var invalidDNS1123Characters = regexp.MustCompile("[^-a-z0-9]+")
func ToK8sEnvVar(env []*core.KeyValuePair) []v1.EnvVar {
envVars := make([]v1.EnvVar, 0, len(env))
for _, kv := range env {
- envVars = append(envVars, v1.EnvVar{Name: kv.Key, Value: kv.Value})
+ envVars = append(envVars, v1.EnvVar{Name: kv.GetKey(), Value: kv.GetValue()})
}
return envVars
}
diff --git a/flytepropeller/pkg/visualize/visualize.go b/flytepropeller/pkg/visualize/visualize.go
index 6a5ee7ba11..14128fde84 100644
--- a/flytepropeller/pkg/visualize/visualize.go
+++ b/flytepropeller/pkg/visualize/visualize.go
@@ -34,11 +34,11 @@ func flatten(binding *core.BindingData, flatMap map[common.NodeID]sets.String) {
flatten(v, flatMap)
}
case *core.BindingData_Promise:
- if _, ok := flatMap[binding.GetPromise().NodeId]; !ok {
- flatMap[binding.GetPromise().NodeId] = sets.String{}
+ if _, ok := flatMap[binding.GetPromise().GetNodeId()]; !ok {
+ flatMap[binding.GetPromise().GetNodeId()] = sets.String{}
}
- flatMap[binding.GetPromise().NodeId].Insert(binding.GetPromise().GetVar())
+ flatMap[binding.GetPromise().GetNodeId()].Insert(binding.GetPromise().GetVar())
case *core.BindingData_Scalar:
if _, ok := flatMap[staticNodeID]; !ok {
flatMap[staticNodeID] = sets.NewString()
@@ -142,11 +142,11 @@ func WorkflowToGraphViz(g *v1alpha1.FlyteWorkflow) string {
func ToGraphViz(g *core.CompiledWorkflow) string {
res := fmt.Sprintf("digraph G {rankdir=TB;workflow[label=\"Workflow Id: %v\"];node[style=filled];",
- g.Template.GetId())
+ g.GetTemplate().GetId())
nodeFinder := func(nodeId common.NodeID) *core.Node {
- for _, n := range g.Template.Nodes {
- if n.Id == nodeId {
+ for _, n := range g.GetTemplate().GetNodes() {
+ if n.GetId() == nodeId {
return n
}
}
@@ -204,9 +204,9 @@ func ToGraphViz(g *core.CompiledWorkflow) string {
node := nodesToVisit.Deque()
nodes, found := g.GetConnections().GetDownstream()[node]
if found {
- nodesToVisit.Enqueue(nodes.Ids...)
+ nodesToVisit.Enqueue(nodes.GetIds()...)
- for _, child := range nodes.Ids {
+ for _, child := range nodes.GetIds() {
label := edgeLabel(node, child)
edge := fmt.Sprintf("\"%v\" -> \"%v\" [label=\"%v\",style=\"%v\"];",
nodeLabel(node),
diff --git a/flytepropeller/pkg/webhook/aws_secret_manager.go b/flytepropeller/pkg/webhook/aws_secret_manager.go
index d1595ffc1e..ad5e8c48f4 100644
--- a/flytepropeller/pkg/webhook/aws_secret_manager.go
+++ b/flytepropeller/pkg/webhook/aws_secret_manager.go
@@ -47,7 +47,7 @@ type AWSSecretManagerInjector struct {
}
func formatAWSSecretArn(secret *core.Secret) string {
- return strings.TrimRight(secret.Group, ":") + ":" + strings.TrimLeft(secret.Key, ":")
+ return strings.TrimRight(secret.GetGroup(), ":") + ":" + strings.TrimLeft(secret.GetKey(), ":")
}
func formatAWSInitContainerName(index int) string {
@@ -59,12 +59,12 @@ func (i AWSSecretManagerInjector) Type() config.SecretManagerType {
}
func (i AWSSecretManagerInjector) Inject(ctx context.Context, secret *core.Secret, p *corev1.Pod) (newP *corev1.Pod, injected bool, err error) {
- if len(secret.Group) == 0 || len(secret.Key) == 0 {
+ if len(secret.GetGroup()) == 0 || len(secret.GetKey()) == 0 {
return nil, false, fmt.Errorf("AWS Secrets Webhook require both key and group to be set. "+
"Secret: [%v]", secret)
}
- switch secret.MountRequirement {
+ switch secret.GetMountRequirement() {
case core.Secret_ANY:
fallthrough
case core.Secret_FILE:
@@ -112,7 +112,7 @@ func (i AWSSecretManagerInjector) Inject(ctx context.Context, secret *core.Secre
case core.Secret_ENV_VAR:
fallthrough
default:
- err := fmt.Errorf("unrecognized mount requirement [%v] for secret [%v]", secret.MountRequirement.String(), secret.Key)
+ err := fmt.Errorf("unrecognized mount requirement [%v] for secret [%v]", secret.GetMountRequirement().String(), secret.GetKey())
logger.Error(ctx, err)
return p, false, err
}
@@ -138,7 +138,7 @@ func createAWSSidecarContainer(cfg config.AWSSecretManagerConfig, p *corev1.Pod,
},
{
Name: AWSSecretFilenameEnvVar,
- Value: filepath.Join(string(filepath.Separator), strings.ToLower(secret.Group), strings.ToLower(secret.Key)),
+ Value: filepath.Join(string(filepath.Separator), strings.ToLower(secret.GetGroup()), strings.ToLower(secret.GetKey())),
},
},
Resources: cfg.Resources,
diff --git a/flytepropeller/pkg/webhook/aws_secret_manager_test.go b/flytepropeller/pkg/webhook/aws_secret_manager_test.go
index d2a74de80b..cb0c9ddba4 100644
--- a/flytepropeller/pkg/webhook/aws_secret_manager_test.go
+++ b/flytepropeller/pkg/webhook/aws_secret_manager_test.go
@@ -44,11 +44,11 @@ func TestAWSSecretManagerInjector_Inject(t *testing.T) {
Env: []corev1.EnvVar{
{
Name: "SECRET_ARN",
- Value: inputSecret.Group + ":" + inputSecret.Key,
+ Value: inputSecret.GetGroup() + ":" + inputSecret.GetKey(),
},
{
Name: "SECRET_FILENAME",
- Value: "/" + inputSecret.Group + "/" + inputSecret.Key,
+ Value: "/" + inputSecret.GetGroup() + "/" + inputSecret.GetKey(),
},
{
Name: "FLYTE_SECRETS_DEFAULT_DIR",
diff --git a/flytepropeller/pkg/webhook/gcp_secret_manager.go b/flytepropeller/pkg/webhook/gcp_secret_manager.go
index c69705594e..4db4a0d3ab 100644
--- a/flytepropeller/pkg/webhook/gcp_secret_manager.go
+++ b/flytepropeller/pkg/webhook/gcp_secret_manager.go
@@ -41,12 +41,12 @@ func formatGCPSecretAccessCommand(secret *core.Secret) []string {
// `gcloud` writes this file with permission 0600.
// This will cause permission issues in the main container when using non-root
// users, so we fix the file permissions with `chmod`.
- secretDir := strings.ToLower(filepath.Join(GCPSecretMountPath, secret.Group))
- secretPath := strings.ToLower(filepath.Join(secretDir, secret.GroupVersion))
+ secretDir := strings.ToLower(filepath.Join(GCPSecretMountPath, secret.GetGroup()))
+ secretPath := strings.ToLower(filepath.Join(secretDir, secret.GetGroupVersion()))
args := fmt.Sprintf(
"gcloud secrets versions access %[1]s/versions/%[2]s --out-file=%[4]s || gcloud secrets versions access %[2]s --secret=%[1]s --out-file=%[4]s; chmod +rX %[3]s %[4]s",
- secret.Group,
- secret.GroupVersion,
+ secret.GetGroup(),
+ secret.GetGroupVersion(),
secretDir,
secretPath,
)
@@ -62,12 +62,12 @@ func (i GCPSecretManagerInjector) Type() config.SecretManagerType {
}
func (i GCPSecretManagerInjector) Inject(ctx context.Context, secret *core.Secret, p *corev1.Pod) (newP *corev1.Pod, injected bool, err error) {
- if len(secret.Group) == 0 || len(secret.GroupVersion) == 0 {
+ if len(secret.GetGroup()) == 0 || len(secret.GetGroupVersion()) == 0 {
return nil, false, fmt.Errorf("GCP Secrets Webhook require both group and group version to be set. "+
"Secret: [%v]", secret)
}
- switch secret.MountRequirement {
+ switch secret.GetMountRequirement() {
case core.Secret_ANY:
fallthrough
case core.Secret_FILE:
@@ -115,7 +115,7 @@ func (i GCPSecretManagerInjector) Inject(ctx context.Context, secret *core.Secre
case core.Secret_ENV_VAR:
fallthrough
default:
- err := fmt.Errorf("unrecognized mount requirement [%v] for secret [%v]", secret.MountRequirement.String(), secret.Key)
+ err := fmt.Errorf("unrecognized mount requirement [%v] for secret [%v]", secret.GetMountRequirement().String(), secret.GetKey())
logger.Error(ctx, err)
return p, false, err
}
diff --git a/flytepropeller/pkg/webhook/global_secrets.go b/flytepropeller/pkg/webhook/global_secrets.go
index a4b3543fb1..21432f6ccc 100644
--- a/flytepropeller/pkg/webhook/global_secrets.go
+++ b/flytepropeller/pkg/webhook/global_secrets.go
@@ -35,20 +35,20 @@ func (g GlobalSecrets) Inject(ctx context.Context, secret *coreIdl.Secret, p *co
return p, false, err
}
- switch secret.MountRequirement {
+ switch secret.GetMountRequirement() {
case coreIdl.Secret_FILE:
return nil, false, fmt.Errorf("global secrets can only be injected as environment "+
- "variables [%v/%v]", secret.Group, secret.Key)
+ "variables [%v/%v]", secret.GetGroup(), secret.GetKey())
case coreIdl.Secret_ANY:
fallthrough
case coreIdl.Secret_ENV_VAR:
- if len(secret.Group) == 0 {
+ if len(secret.GetGroup()) == 0 {
return nil, false, fmt.Errorf("mounting a secret to env var requires selecting the "+
- "secret and a single key within. Key [%v]", secret.Key)
+ "secret and a single key within. Key [%v]", secret.GetKey())
}
envVar := corev1.EnvVar{
- Name: strings.ToUpper(K8sDefaultEnvVarPrefix + secret.Group + EnvVarGroupKeySeparator + secret.Key),
+ Name: strings.ToUpper(K8sDefaultEnvVarPrefix + secret.GetGroup() + EnvVarGroupKeySeparator + secret.GetKey()),
Value: v,
}
@@ -63,7 +63,7 @@ func (g GlobalSecrets) Inject(ctx context.Context, secret *coreIdl.Secret, p *co
p.Spec.InitContainers = AppendEnvVars(p.Spec.InitContainers, envVar)
p.Spec.Containers = AppendEnvVars(p.Spec.Containers, envVar)
default:
- err := fmt.Errorf("unrecognized mount requirement [%v] for secret [%v]", secret.MountRequirement.String(), secret.Key)
+ err := fmt.Errorf("unrecognized mount requirement [%v] for secret [%v]", secret.GetMountRequirement().String(), secret.GetKey())
logger.Error(ctx, err)
return p, false, err
}
diff --git a/flytepropeller/pkg/webhook/k8s_secrets.go b/flytepropeller/pkg/webhook/k8s_secrets.go
index 102d1ae6c1..68bb8669d2 100644
--- a/flytepropeller/pkg/webhook/k8s_secrets.go
+++ b/flytepropeller/pkg/webhook/k8s_secrets.go
@@ -38,12 +38,12 @@ func (i K8sSecretInjector) Type() config.SecretManagerType {
}
func (i K8sSecretInjector) Inject(ctx context.Context, secret *core.Secret, p *corev1.Pod) (newP *corev1.Pod, injected bool, err error) {
- if len(secret.Group) == 0 || len(secret.Key) == 0 {
+ if len(secret.GetGroup()) == 0 || len(secret.GetKey()) == 0 {
return nil, false, fmt.Errorf("k8s Secrets Webhook require both key and group to be set. "+
"Secret: [%v]", secret)
}
- switch secret.MountRequirement {
+ switch secret.GetMountRequirement() {
case core.Secret_ANY:
fallthrough
case core.Secret_FILE:
@@ -88,7 +88,7 @@ func (i K8sSecretInjector) Inject(ctx context.Context, secret *core.Secret, p *c
p.Spec.InitContainers = AppendEnvVars(p.Spec.InitContainers, prefixEnvVar)
p.Spec.Containers = AppendEnvVars(p.Spec.Containers, prefixEnvVar)
default:
- err := fmt.Errorf("unrecognized mount requirement [%v] for secret [%v]", secret.MountRequirement.String(), secret.Key)
+ err := fmt.Errorf("unrecognized mount requirement [%v] for secret [%v]", secret.GetMountRequirement().String(), secret.GetKey())
logger.Error(ctx, err)
return p, false, err
}
diff --git a/flytepropeller/pkg/webhook/utils.go b/flytepropeller/pkg/webhook/utils.go
index 92a4995c24..9d40cbbe6f 100644
--- a/flytepropeller/pkg/webhook/utils.go
+++ b/flytepropeller/pkg/webhook/utils.go
@@ -26,13 +26,13 @@ func hasEnvVar(envVars []corev1.EnvVar, envVarKey string) bool {
func CreateEnvVarForSecret(secret *core.Secret) corev1.EnvVar {
optional := true
return corev1.EnvVar{
- Name: strings.ToUpper(K8sDefaultEnvVarPrefix + secret.Group + EnvVarGroupKeySeparator + secret.Key),
+ Name: strings.ToUpper(K8sDefaultEnvVarPrefix + secret.GetGroup() + EnvVarGroupKeySeparator + secret.GetKey()),
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
- Name: secret.Group,
+ Name: secret.GetGroup(),
},
- Key: secret.Key,
+ Key: secret.GetKey(),
Optional: &optional,
},
},
@@ -43,14 +43,14 @@ func CreateVolumeForSecret(secret *core.Secret) corev1.Volume {
optional := true
return corev1.Volume{
// we don't want to create different volume for the same secret group
- Name: encoding.Base32Encoder.EncodeToString([]byte(secret.Group + EnvVarGroupKeySeparator + secret.GroupVersion)),
+ Name: encoding.Base32Encoder.EncodeToString([]byte(secret.GetGroup() + EnvVarGroupKeySeparator + secret.GetGroupVersion())),
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
- SecretName: secret.Group,
+ SecretName: secret.GetGroup(),
Items: []corev1.KeyToPath{
{
- Key: secret.Key,
- Path: strings.ToLower(secret.Key),
+ Key: secret.GetKey(),
+ Path: strings.ToLower(secret.GetKey()),
},
},
Optional: &optional,
@@ -63,7 +63,7 @@ func CreateVolumeMountForSecret(volumeName string, secret *core.Secret) corev1.V
return corev1.VolumeMount{
Name: volumeName,
ReadOnly: true,
- MountPath: filepath.Join(filepath.Join(K8sSecretPathPrefix...), strings.ToLower(secret.Group)),
+ MountPath: filepath.Join(filepath.Join(K8sSecretPathPrefix...), strings.ToLower(secret.GetGroup())),
}
}
@@ -130,15 +130,15 @@ func CreateVaultAnnotationsForSecret(secret *core.Secret, kvversion config.KVVer
id := string(uuid.NewUUID())
secretVaultAnnotations := map[string]string{
- fmt.Sprintf("vault.hashicorp.com/agent-inject-secret-%s", id): secret.Group,
- fmt.Sprintf("vault.hashicorp.com/agent-inject-file-%s", id): fmt.Sprintf("%s/%s", secret.Group, secret.Key),
+ fmt.Sprintf("vault.hashicorp.com/agent-inject-secret-%s", id): secret.GetGroup(),
+ fmt.Sprintf("vault.hashicorp.com/agent-inject-file-%s", id): fmt.Sprintf("%s/%s", secret.GetGroup(), secret.GetKey()),
}
// Set the consul template language query depending on the KV Secrets Engine version.
// Version 1 stores plain k:v pairs under .Data, version 2 supports versioned secrets
// and wraps the k:v pairs into an additional subfield.
var query string
- switch secret.GroupVersion {
+ switch secret.GetGroupVersion() {
case "kv1":
query = ".Data"
case "kv2":
@@ -157,7 +157,7 @@ func CreateVaultAnnotationsForSecret(secret *core.Secret, kvversion config.KVVer
}
}
if query != "" {
- template := fmt.Sprintf(`{{- with secret "%s" -}}{{ %s.%s }}{{- end -}}`, secret.Group, query, secret.Key)
+ template := fmt.Sprintf(`{{- with secret "%s" -}}{{ %s.%s }}{{- end -}}`, secret.GetGroup(), query, secret.GetKey())
secretVaultAnnotations[fmt.Sprintf("vault.hashicorp.com/agent-inject-template-%s", id)] = template
}
diff --git a/flytepropeller/pkg/webhook/vault_secret_manager.go b/flytepropeller/pkg/webhook/vault_secret_manager.go
index 658e3970d1..e5430153d6 100644
--- a/flytepropeller/pkg/webhook/vault_secret_manager.go
+++ b/flytepropeller/pkg/webhook/vault_secret_manager.go
@@ -35,12 +35,12 @@ func (i VaultSecretManagerInjector) Type() config.SecretManagerType {
}
func (i VaultSecretManagerInjector) Inject(ctx context.Context, secret *coreIdl.Secret, p *corev1.Pod) (newP *corev1.Pod, injected bool, err error) {
- if len(secret.Group) == 0 || len(secret.Key) == 0 {
+ if len(secret.GetGroup()) == 0 || len(secret.GetKey()) == 0 {
return nil, false, fmt.Errorf("Vault Secrets Webhook requires both key and group to be set. "+
"Secret: [%v]", secret)
}
- switch secret.MountRequirement {
+ switch secret.GetMountRequirement() {
case coreIdl.Secret_ANY:
fallthrough
case coreIdl.Secret_FILE:
@@ -76,7 +76,7 @@ func (i VaultSecretManagerInjector) Inject(ctx context.Context, secret *coreIdl.
case coreIdl.Secret_ENV_VAR:
return p, false, fmt.Errorf("Env_Var is not a supported mount requirement for Vault Secret Manager")
default:
- err := fmt.Errorf("unrecognized mount requirement [%v] for secret [%v]", secret.MountRequirement.String(), secret.Key)
+ err := fmt.Errorf("unrecognized mount requirement [%v] for secret [%v]", secret.GetMountRequirement().String(), secret.GetKey())
logger.Error(ctx, err)
return p, false, err
}
diff --git a/flytestdlib/.golangci.yml b/flytestdlib/.golangci.yml
index e3bff2320b..4ae605454b 100644
--- a/flytestdlib/.golangci.yml
+++ b/flytestdlib/.golangci.yml
@@ -1,34 +1,24 @@
-# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES.
-# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY:
-#
-# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst
-
run:
skip-dirs:
- pkg/client
-
linters:
disable-all: true
enable:
- - deadcode
- errcheck
- - gas
+ - gosec
- gci
- goconst
- goimports
- - golint
- gosimple
- govet
- ineffassign
- misspell
- nakedret
- staticcheck
- - structcheck
- typecheck
- unconvert
- unused
- - varcheck
-
+ - protogetter
linters-settings:
gci:
custom-order: true
@@ -37,3 +27,5 @@ linters-settings:
- default
- prefix(github.com/flyteorg)
skip-generated: true
+ goconst:
+ ignore-tests: true
diff --git a/flytestdlib/bitarray/bitset.go b/flytestdlib/bitarray/bitset.go
index 883b9ded65..be957fecb3 100644
--- a/flytestdlib/bitarray/bitset.go
+++ b/flytestdlib/bitarray/bitset.go
@@ -14,6 +14,7 @@ type BitSet []Block
// Ensures that the given bit is set in the BitSet.
func (s *BitSet) Set(i uint) {
+ // #nosec G115
if len(*s) < int(i/blockSize+1) {
*s = append(*s, make([]Block, i/blockSize+1)...)
}
@@ -23,6 +24,7 @@ func (s *BitSet) Set(i uint) {
// Ensures that the given bit is cleared (unset) in the BitSet.
func (s *BitSet) Clear(i uint) {
+ // #nosec G115
if len(*s) >= int(i/blockSize+1) {
(*s)[i/blockSize] &^= 1 << (i % blockSize)
}
@@ -30,6 +32,7 @@ func (s *BitSet) Clear(i uint) {
// Returns true if the given bit is set, false if it is cleared.
func (s *BitSet) IsSet(i uint) bool {
+ // #nosec G115
if len(*s) < int(i/blockSize+1) {
return false
}
@@ -44,7 +47,8 @@ func (s *BitSet) BlockCount() int {
// Returns the length of the BitSet.
func (s *BitSet) Cap() uint {
- return uint(s.BlockCount()) * blockSize
+ return uint(s.BlockCount()) * blockSize // #nosec G115
+
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
diff --git a/flytestdlib/bitarray/bitset_test.go b/flytestdlib/bitarray/bitset_test.go
index 72e91f70d0..60572fbe3c 100644
--- a/flytestdlib/bitarray/bitset_test.go
+++ b/flytestdlib/bitarray/bitset_test.go
@@ -58,11 +58,11 @@ func TestNewBitSet(t *testing.T) {
func TestBitSet_Cap(t *testing.T) {
t.Run("Cap == size", func(t *testing.T) {
b := NewBitSet(blockSize * 5)
- assert.Equal(t, int(blockSize*5), int(b.Cap()))
+ assert.Equal(t, int(blockSize*5), int(b.Cap())) // #nosec G115
})
t.Run("Cap > size", func(t *testing.T) {
b := NewBitSet(blockSize*2 + 20)
- assert.Equal(t, int(blockSize*3), int(b.Cap()))
+ assert.Equal(t, int(blockSize*3), int(b.Cap())) // #nosec G115
})
}
diff --git a/flytestdlib/bitarray/compact_array.go b/flytestdlib/bitarray/compact_array.go
index 827c8c8532..f2d562a786 100644
--- a/flytestdlib/bitarray/compact_array.go
+++ b/flytestdlib/bitarray/compact_array.go
@@ -39,13 +39,16 @@ func (a *CompactArray) validateValue(value Item) {
func (a *CompactArray) SetItem(index int, value Item) {
a.validateIndex(index)
a.validateValue(value)
- bitIndex := uint(index) * a.ItemSize
+ bitIndex := uint(index) * a.ItemSize // #nosec G115
x := Item(1)
+ // #nosec G115
for i := int(a.ItemSize - 1); i >= 0; i-- {
if x&value != 0 {
- a.BitSet.Set(bitIndex + uint(i))
+ a.BitSet.Set(bitIndex + uint(i)) // #nosec G115
+
} else {
- a.BitSet.Clear(bitIndex + uint(i))
+ a.BitSet.Clear(bitIndex + uint(i)) // #nosec G115
+
}
x <<= 1
@@ -55,10 +58,12 @@ func (a *CompactArray) SetItem(index int, value Item) {
// Gets Item at provided index.
func (a *CompactArray) GetItem(index int) Item {
a.validateIndex(index)
- bitIndex := uint(index) * a.ItemSize
+ bitIndex := uint(index) * a.ItemSize // #nosec G115
res := Item(0)
x := Item(1)
+ // #nosec G115
for i := int(a.ItemSize - 1); i >= 0; i-- {
+ // #nosec G115
if a.BitSet.IsSet(bitIndex + uint(i)) {
res |= x
}
@@ -72,8 +77,9 @@ func (a *CompactArray) GetItem(index int) Item {
// Gets all items stored in the array. The size of the returned array matches the ItemsCount it was initialized with.
func (a CompactArray) GetItems() []Item {
res := make([]Item, 0, a.ItemsCount)
+ // #nosec G115
for i := 0; i < int(a.ItemsCount); i++ {
- res = append(res, a.GetItem(i))
+ res = append(res, a.GetItem(i)) // #nosec G115
}
return res
diff --git a/flytestdlib/bitarray/compact_array_test.go b/flytestdlib/bitarray/compact_array_test.go
index 7d41ee7b41..8d69f839d9 100644
--- a/flytestdlib/bitarray/compact_array_test.go
+++ b/flytestdlib/bitarray/compact_array_test.go
@@ -32,6 +32,7 @@ func TestNewItemArray(t *testing.T) {
arr, err := NewCompactArray(itemsCount, Item(1)<<(itemSize-1))
assert.NoError(t, err)
+ // #nosec G115
for i := 0; i < int(itemsCount); i++ {
// Ensure inserted items is in the accepted range (0 -> 1<