From 44f565f34e70a175f81b882143ec3ad97574176e Mon Sep 17 00:00:00 2001 From: Evan Johnson Date: Thu, 25 Apr 2024 13:56:42 -0400 Subject: [PATCH 1/3] update provider name in documentation to linode-linode --- Makefile | 2 +- docs/src/developers/development.md | 12 ++++++------ docs/src/topics/backups.md | 2 +- docs/src/topics/etcd.md | 2 +- docs/src/topics/flavors/cluster-autoscaler.md | 2 +- docs/src/topics/flavors/clusterclass-kubeadm.md | 2 +- docs/src/topics/flavors/default.md | 2 +- docs/src/topics/flavors/dual-stack.md | 2 +- docs/src/topics/flavors/etcd-backup-restore.md | 2 +- docs/src/topics/flavors/etcd-disk.md | 2 +- docs/src/topics/flavors/k3s.md | 2 +- docs/src/topics/flavors/rke2.md | 2 +- docs/src/topics/getting-started.md | 4 ++-- docs/src/topics/health-checking.md | 2 +- 14 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Makefile b/Makefile index 7e3982f5c..196011b82 100644 --- a/Makefile +++ b/Makefile @@ -239,7 +239,7 @@ release-manifests: $(KUSTOMIZE) $(RELEASE_DIR) .PHONY: local-release local-release: - RELEASE_DIR=infrastructure-linode/v0.0.0 $(MAKE) release + RELEASE_DIR=infrastructure-local-linode/v0.0.0 $(MAKE) release $(MAKE) clean-release-git ## -------------------------------------- diff --git a/docs/src/developers/development.md b/docs/src/developers/development.md index a483623d5..6a766841c 100644 --- a/docs/src/developers/development.md +++ b/docs/src/developers/development.md @@ -150,10 +150,10 @@ For local development, templates should be generated via: make local-release ``` -This creates `infrastructure-linode/v0.0.0/` with all the cluster templates: +This creates `infrastructure-local-linode/v0.0.0/` with all the cluster templates: ```sh -infrastructure-linode/v0.0.0 +infrastructure-local-linode/v0.0.0 ├── cluster-template-clusterclass-kubeadm.yaml ├── cluster-template-etcd-backup-restore.yaml ├── cluster-template-k3s.yaml @@ -169,7 +169,7 @@ This can then be used with `clusterctl` by adding the following to `~/.clusterct ``` providers: - - name: akamai-linode + - name: local-linode url: ${HOME}/cluster-api-provider-linode/infrastructure-linode/v0.0.0/infrastructure-components.yaml type: InfrastructureProvider ``` @@ -194,7 +194,7 @@ export LINODE_MACHINE_TYPE=g6-standard-2 You can also use `clusterctl generate` to see which variables need to be set: ``` -clusterctl generate cluster $CLUSTER_NAME --infrastructure akamai-linode:v0.0.0 [--flavor ] --list-variables +clusterctl generate cluster $CLUSTER_NAME --infrastructure local-linode:v0.0.0 [--flavor ] --list-variables ``` ~~~ @@ -209,7 +209,7 @@ you can deploy a workload cluster with the default flavor: ```sh clusterctl generate cluster $CLUSTER_NAME \ --kubernetes-version v1.29.1 \ - --infrastructure akamai-akamai-linode:v0.0.0 \ + --infrastructure local-linode:v0.0.0 \ | kubectl apply -f - ``` @@ -229,7 +229,7 @@ management cluster has the [ClusterTopology feature gate set](https://cluster-ap ```sh clusterctl generate cluster $CLUSTER_NAME \ --kubernetes-version v1.29.1 \ - --infrastructure akamai-linode:v0.0.0 \ + --infrastructure local-linode:v0.0.0 \ --flavor clusterclass-kubeadm \ | kubectl apply -f - ``` diff --git a/docs/src/topics/backups.md b/docs/src/topics/backups.md index 0f41de5d4..32953fe04 100644 --- a/docs/src/topics/backups.md +++ b/docs/src/topics/backups.md @@ -12,7 +12,7 @@ To enable backups, use the addon flag during provisioning to select the etcd-bac ```sh clusterctl generate cluster $CLUSTER_NAME \ --kubernetes-version v1.29.1 \ - --infrastructure akamai-linode \ + --infrastructure linode-linode \ --flavor etcd-backup-restore \ | kubectl apply -f - ``` diff --git a/docs/src/topics/etcd.md b/docs/src/topics/etcd.md index e69ac014a..bc64520bc 100644 --- a/docs/src/topics/etcd.md +++ b/docs/src/topics/etcd.md @@ -39,7 +39,7 @@ export ETCDBR_IMAGE=docker.io/username/your-custom-image:version export SSE_KEY=cdQdZ3PrKgm5vmqxeqwQCuAWJ7pPVyHg clusterctl generate cluster $CLUSTER_NAME \ --kubernetes-version v1.29.1 \ - --infrastructure akamai-linode \ + --infrastructure linode-linode \ --flavor etcd-backup-restore \ | kubectl apply -f - ``` diff --git a/docs/src/topics/flavors/cluster-autoscaler.md b/docs/src/topics/flavors/cluster-autoscaler.md index ef7bfc39c..14d776c9e 100644 --- a/docs/src/topics/flavors/cluster-autoscaler.md +++ b/docs/src/topics/flavors/cluster-autoscaler.md @@ -33,7 +33,7 @@ Autoscaler](https://www.github.com/kubernetes/autoscaler/tree/master/cluster-aut ```sh clusterctl generate cluster test-cluster \ --kubernetes-version v1.29.1 \ - --infrastructure akamai-linode \ + --infrastructure linode-linode \ --flavor cluster-autoscaler > test-cluster.yaml ``` diff --git a/docs/src/topics/flavors/clusterclass-kubeadm.md b/docs/src/topics/flavors/clusterclass-kubeadm.md index 37853c2bf..efdd7cc5b 100644 --- a/docs/src/topics/flavors/clusterclass-kubeadm.md +++ b/docs/src/topics/flavors/clusterclass-kubeadm.md @@ -11,7 +11,7 @@ ```bash clusterctl generate cluster test-cluster \ --kubernetes-version v1.29.1 \ - --infrastructure akamai-linode \ + --infrastructure linode-linode \ --flavor clusterclass-kubeadm > test-cluster.yaml ``` 2. Apply cluster manifests diff --git a/docs/src/topics/flavors/default.md b/docs/src/topics/flavors/default.md index 89680bdf5..151eaf6bf 100644 --- a/docs/src/topics/flavors/default.md +++ b/docs/src/topics/flavors/default.md @@ -10,7 +10,7 @@ ```bash clusterctl generate cluster test-cluster \ --kubernetes-version v1.29.1 \ - --infrastructure akamai-linode > test-cluster.yaml + --infrastructure linode-linode > test-cluster.yaml ``` 2. Apply cluster yaml ```bash diff --git a/docs/src/topics/flavors/dual-stack.md b/docs/src/topics/flavors/dual-stack.md index 1a5624480..2565a53bd 100644 --- a/docs/src/topics/flavors/dual-stack.md +++ b/docs/src/topics/flavors/dual-stack.md @@ -10,7 +10,7 @@ ```bash clusterctl generate cluster test-cluster \ --kubernetes-version v1.29.1 \ - --infrastructure akamai-linode \ + --infrastructure linode-linode \ --flavor dual-stack > test-cluster.yaml ``` 2. Apply cluster yaml diff --git a/docs/src/topics/flavors/etcd-backup-restore.md b/docs/src/topics/flavors/etcd-backup-restore.md index ae097f978..74c9133b6 100644 --- a/docs/src/topics/flavors/etcd-backup-restore.md +++ b/docs/src/topics/flavors/etcd-backup-restore.md @@ -12,7 +12,7 @@ ```bash clusterctl generate cluster test-cluster \ --kubernetes-version v1.29.1 \ - --infrastructure akamai-linode \ + --infrastructure linode-linode \ --flavor etcd-backup-restore > test-cluster.yaml ``` 2. Apply cluster yaml diff --git a/docs/src/topics/flavors/etcd-disk.md b/docs/src/topics/flavors/etcd-disk.md index f0036de50..e76ad757c 100644 --- a/docs/src/topics/flavors/etcd-disk.md +++ b/docs/src/topics/flavors/etcd-disk.md @@ -18,7 +18,7 @@ the `quota-backend-bytes` to `8589934592` (8 GiB) per recommendation from ```bash clusterctl generate cluster test-cluster \ --kubernetes-version v1.29.1 \ - --infrastructure akamai-linode \ + --infrastructure linode-linode \ --flavor etcd-disk > test-cluster.yaml ``` 2. Apply cluster yaml diff --git a/docs/src/topics/flavors/k3s.md b/docs/src/topics/flavors/k3s.md index afcfa11db..54595a4f7 100644 --- a/docs/src/topics/flavors/k3s.md +++ b/docs/src/topics/flavors/k3s.md @@ -27,7 +27,7 @@ ```bash clusterctl generate cluster test-cluster \ --kubernetes-version v1.29.1+k3s2 \ - --infrastructure akamai-linode \ + --infrastructure linode-linode \ --flavor k3s > test-k3s-cluster.yaml ``` 2. Apply cluster yaml diff --git a/docs/src/topics/flavors/rke2.md b/docs/src/topics/flavors/rke2.md index ec7a4fcb0..dd090803f 100644 --- a/docs/src/topics/flavors/rke2.md +++ b/docs/src/topics/flavors/rke2.md @@ -25,7 +25,7 @@ will not work for RKE2 versions >= v1.29. ```bash clusterctl generate cluster test-cluster \ --kubernetes-version v1.29.1+rke2r1 \ - --infrastructure akamai-linode \ + --infrastructure linode-linode \ --flavor rke2 > test-rke2-cluster.yaml ``` 2. Apply cluster yaml diff --git a/docs/src/topics/getting-started.md b/docs/src/topics/getting-started.md index 658edfd84..618f86b38 100644 --- a/docs/src/topics/getting-started.md +++ b/docs/src/topics/getting-started.md @@ -40,7 +40,7 @@ By default, clusters are provisioned within VPC. For Regions which do not have [ 1. Add `linode` as an infrastructure provider in `~/.cluster-api/clusterctl.yaml` ```yaml providers: - - name: akamai-linode + - name: linode-linode url: https://github.com/linode/cluster-api-provider-linode/releases/latest/infrastructure-components.yaml type: InfrastructureProvider ``` @@ -48,7 +48,7 @@ By default, clusters are provisioned within VPC. For Regions which do not have [ ## Install CAPL on your management cluster Install CAPL and enable the helm addon provider which is used by the majority of the CAPL flavors ```bash -clusterctl init --infrastructure akamai-linode --addon helm +clusterctl init --infrastructure linode-linode --addon helm ``` ## Deploying your first cluster diff --git a/docs/src/topics/health-checking.md b/docs/src/topics/health-checking.md index dafb351ee..0fe35f01b 100644 --- a/docs/src/topics/health-checking.md +++ b/docs/src/topics/health-checking.md @@ -10,7 +10,7 @@ using the `self-healing` flavor is the quickest way to get started: ```sh clusterctl generate cluster $CLUSTER_NAME \ --kubernetes-version v1.29.1 \ - --infrastructure akamai-linode \ + --infrastructure linode-linode \ --flavor self-healing \ | kubectl apply -f - ``` From 75c6a5dddb5190a3257043562cf8108b64e864f7 Mon Sep 17 00:00:00 2001 From: bcm820 Date: Fri, 26 Apr 2024 11:41:31 -0400 Subject: [PATCH 2/3] Add mocktest module (#255) --- .golangci.yml | 5 + Makefile | 2 +- cloud/scope/client.go | 8 +- cloud/scope/cluster.go | 4 +- cloud/scope/cluster_test.go | 24 +- cloud/scope/common.go | 2 +- cloud/scope/common_test.go | 2 +- cloud/scope/machine.go | 8 +- cloud/scope/machine_test.go | 613 ++++------- cloud/scope/object_storage_bucket.go | 14 +- cloud/scope/object_storage_bucket_test.go | 104 +- cloud/scope/vpc.go | 4 +- cloud/scope/vpc_test.go | 24 +- cmd/main.go | 12 +- .../linodemachine_controller_helpers_test.go | 18 +- .../linodeobjectstoragebucket_controller.go | 18 +- ...nodeobjectstoragebucket_controller_test.go | 998 ++++++++---------- docs/src/developers/testing.md | 166 +++ mock/client.go | 100 +- mock/common.go | 41 + mock/mocktest/mock.go | 60 ++ mock/mocktest/mock_test.go | 25 + mock/mocktest/node.go | 143 +++ mock/mocktest/node_test.go | 23 + mock/mocktest/path.go | 120 +++ mock/mocktest/path_test.go | 588 +++++++++++ mock/mocktest/suite.go | 144 +++ mock/mocktest/suite_test.go | 136 +++ 28 files changed, 2254 insertions(+), 1152 deletions(-) create mode 100644 mock/common.go create mode 100644 mock/mocktest/mock.go create mode 100644 mock/mocktest/mock_test.go create mode 100644 mock/mocktest/node.go create mode 100644 mock/mocktest/node_test.go create mode 100644 mock/mocktest/path.go create mode 100644 mock/mocktest/path_test.go create mode 100644 mock/mocktest/suite.go create mode 100644 mock/mocktest/suite_test.go diff --git a/.golangci.yml b/.golangci.yml index 927fc843e..b7fe690b0 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -220,6 +220,11 @@ issues: - path: controller/suite_test.go linters: - gci + # Exclude goimports check for controller tests that import both mocktest and ginkgo/gomega as dot imports. + # goimports wants mocktest as a dot import in a separate group, but gci wants them in the same group. + - path: controller/.*_controller_test.go + linters: + - goimports # These are performance optimisations rather than style issues per se. # They warn when function arguments or range values copy a lot of memory diff --git a/Makefile b/Makefile index 196011b82..eb4a8edd6 100644 --- a/Makefile +++ b/Makefile @@ -141,7 +141,7 @@ docs: .PHONY: test test: generate fmt vet envtest ## Run tests. - KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(CACHE_BIN) -p path)" go test -race -timeout 60s `go list ./... | grep -v ./mock` -coverprofile cover.out.tmp + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(CACHE_BIN) -p path)" go test -race -timeout 60s `go list ./... | grep -v ./mock$$` -coverprofile cover.out.tmp grep -v "zz_generated.deepcopy.go" cover.out.tmp > cover.out rm cover.out.tmp diff --git a/cloud/scope/client.go b/cloud/scope/client.go index 404a92c7d..d49d3069b 100644 --- a/cloud/scope/client.go +++ b/cloud/scope/client.go @@ -59,12 +59,6 @@ type LinodeObjectStorageClient interface { DeleteObjectStorageKey(ctx context.Context, keyID int) error } -type LinodeObjectStorageClientBuilder func(apiKey string) (LinodeObjectStorageClient, error) - -func CreateLinodeObjectStorageClient(apiKey string) (LinodeObjectStorageClient, error) { - return CreateLinodeClient(apiKey) -} - -type k8sClient interface { +type K8sClient interface { client.Client } diff --git a/cloud/scope/cluster.go b/cloud/scope/cluster.go index 5e3e6e2ee..0573bf89c 100644 --- a/cloud/scope/cluster.go +++ b/cloud/scope/cluster.go @@ -30,7 +30,7 @@ import ( // ClusterScopeParams defines the input parameters used to create a new Scope. type ClusterScopeParams struct { - Client k8sClient + Client K8sClient Cluster *clusterv1.Cluster LinodeCluster *infrav1alpha1.LinodeCluster } @@ -82,7 +82,7 @@ func NewClusterScope(ctx context.Context, apiKey string, params ClusterScopePara // ClusterScope defines the basic context for an actuator to operate upon. type ClusterScope struct { - client k8sClient + client K8sClient PatchHelper *patch.Helper LinodeClient LinodeNodeBalancerClient Cluster *clusterv1.Cluster diff --git a/cloud/scope/cluster_test.go b/cloud/scope/cluster_test.go index f18ecd405..f85602a0a 100644 --- a/cloud/scope/cluster_test.go +++ b/cloud/scope/cluster_test.go @@ -102,7 +102,7 @@ func TestClusterScopeMethods(t *testing.T) { tests := []struct { name string fields fields - expects func(mock *mock.Mockk8sClient) + expects func(mock *mock.MockK8sClient) }{ { name: "Success - finalizer should be added to the Linode Cluster object", @@ -114,7 +114,7 @@ func TestClusterScopeMethods(t *testing.T) { }, }, }, - expects: func(mock *mock.Mockk8sClient) { + expects: func(mock *mock.MockK8sClient) { mock.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { s := runtime.NewScheme() infrav1alpha1.AddToScheme(s) @@ -134,7 +134,7 @@ func TestClusterScopeMethods(t *testing.T) { }, }, }, - expects: func(mock *mock.Mockk8sClient) { + expects: func(mock *mock.MockK8sClient) { mock.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { s := runtime.NewScheme() infrav1alpha1.AddToScheme(s) @@ -151,7 +151,7 @@ func TestClusterScopeMethods(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockK8sClient := mock.NewMockk8sClient(ctrl) + mockK8sClient := mock.NewMockK8sClient(ctrl) testcase.expects(mockK8sClient) @@ -188,7 +188,7 @@ func TestNewClusterScope(t *testing.T) { name string args args expectedError error - expects func(mock *mock.Mockk8sClient) + expects func(mock *mock.MockK8sClient) }{ { name: "Success - Pass in valid args and get a valid ClusterScope", @@ -200,7 +200,7 @@ func TestNewClusterScope(t *testing.T) { }, }, expectedError: nil, - expects: func(mock *mock.Mockk8sClient) { + expects: func(mock *mock.MockK8sClient) { mock.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { s := runtime.NewScheme() infrav1alpha1.AddToScheme(s) @@ -226,7 +226,7 @@ func TestNewClusterScope(t *testing.T) { }, }, expectedError: nil, - expects: func(mock *mock.Mockk8sClient) { + expects: func(mock *mock.MockK8sClient) { mock.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { s := runtime.NewScheme() infrav1alpha1.AddToScheme(s) @@ -250,7 +250,7 @@ func TestNewClusterScope(t *testing.T) { params: ClusterScopeParams{}, }, expectedError: fmt.Errorf("cluster is required when creating a ClusterScope"), - expects: func(mock *mock.Mockk8sClient) {}, + expects: func(mock *mock.MockK8sClient) {}, }, { name: "Error - patchHelper returns error. Checking error handle for when new patchHelper is invoked", @@ -262,7 +262,7 @@ func TestNewClusterScope(t *testing.T) { }, }, expectedError: fmt.Errorf("failed to init patch helper:"), - expects: func(mock *mock.Mockk8sClient) { + expects: func(mock *mock.MockK8sClient) { mock.EXPECT().Scheme().Return(runtime.NewScheme()) }, }, @@ -284,7 +284,7 @@ func TestNewClusterScope(t *testing.T) { }, }, expectedError: fmt.Errorf("credentials from secret ref: get credentials secret test/example: failed to get secret"), - expects: func(mock *mock.Mockk8sClient) { + expects: func(mock *mock.MockK8sClient) { mock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("failed to get secret")) }, }, @@ -298,7 +298,7 @@ func TestNewClusterScope(t *testing.T) { }, }, expectedError: fmt.Errorf("failed to create linode client: missing Linode API key"), - expects: func(mock *mock.Mockk8sClient) {}, + expects: func(mock *mock.MockK8sClient) {}, }, } @@ -310,7 +310,7 @@ func TestNewClusterScope(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockK8sClient := mock.NewMockk8sClient(ctrl) + mockK8sClient := mock.NewMockK8sClient(ctrl) testcase.expects(mockK8sClient) diff --git a/cloud/scope/common.go b/cloud/scope/common.go index 3e912f23e..53c06c2cd 100644 --- a/cloud/scope/common.go +++ b/cloud/scope/common.go @@ -33,7 +33,7 @@ func CreateLinodeClient(apiKey string) (*linodego.Client, error) { return &linodeClient, nil } -func getCredentialDataFromRef(ctx context.Context, crClient k8sClient, credentialsRef corev1.SecretReference, defaultNamespace string) ([]byte, error) { +func getCredentialDataFromRef(ctx context.Context, crClient K8sClient, credentialsRef corev1.SecretReference, defaultNamespace string) ([]byte, error) { secretRef := client.ObjectKey{ Name: credentialsRef.Name, Namespace: credentialsRef.Namespace, diff --git a/cloud/scope/common_test.go b/cloud/scope/common_test.go index 7eec6e8aa..0b6f62ead 100644 --- a/cloud/scope/common_test.go +++ b/cloud/scope/common_test.go @@ -164,7 +164,7 @@ func TestGetCredentialDataFromRef(t *testing.T) { defer ctrl.Finish() // Create an instance of the mock K8sClient - mockClient := mock.NewMockk8sClient(ctrl) + mockClient := mock.NewMockK8sClient(ctrl) // Setup Expected behaviour mockClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(testCase.args.funcBehavior) diff --git a/cloud/scope/machine.go b/cloud/scope/machine.go index 4b7ac818b..f05436f92 100644 --- a/cloud/scope/machine.go +++ b/cloud/scope/machine.go @@ -15,7 +15,7 @@ import ( ) type MachineScopeParams struct { - Client k8sClient + Client K8sClient Cluster *clusterv1.Cluster Machine *clusterv1.Machine LinodeCluster *infrav1alpha1.LinodeCluster @@ -23,7 +23,7 @@ type MachineScopeParams struct { } type MachineScope struct { - Client k8sClient + Client K8sClient PatchHelper *patch.Helper Cluster *clusterv1.Cluster Machine *clusterv1.Machine @@ -34,7 +34,7 @@ type MachineScope struct { func validateMachineScopeParams(params MachineScopeParams) error { if params.Cluster == nil { - return errors.New("custer is required when creating a MachineScope") + return errors.New("cluster is required when creating a MachineScope") } if params.Machine == nil { return errors.New("machine is required when creating a MachineScope") @@ -77,7 +77,7 @@ func NewMachineScope(ctx context.Context, apiKey string, params MachineScopePara if credentialRef != nil { data, err := getCredentialDataFromRef(ctx, params.Client, *credentialRef, defaultNamespace) if err != nil { - return nil, fmt.Errorf("credentials from cluster secret ref: %w", err) + return nil, fmt.Errorf("credentials from secret ref: %w", err) } apiKey = string(data) } diff --git a/cloud/scope/machine_test.go b/cloud/scope/machine_test.go index a355a69d8..c59951fa1 100644 --- a/cloud/scope/machine_test.go +++ b/cloud/scope/machine_test.go @@ -5,20 +5,22 @@ import ( "errors" "testing" - "github.com/linode/linodego" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" infrav1alpha1 "github.com/linode/cluster-api-provider-linode/api/v1alpha1" "github.com/linode/cluster-api-provider-linode/mock" + + . "github.com/linode/cluster-api-provider-linode/mock/mocktest" ) func TestValidateMachineScopeParams(t *testing.T) { @@ -107,128 +109,181 @@ func TestValidateMachineScopeParams(t *testing.T) { } } -func TestMachineScopeMethods(t *testing.T) { +func TestMachineScopeAddFinalizer(t *testing.T) { t.Parallel() - type fields struct { - LinodeMachine *infrav1alpha1.LinodeMachine - } - tests := []struct { - name string - fields fields - expects func(mock *mock.Mockk8sClient) - }{ - // TODO: Add test cases. - { - "Success - finalizer should be added to the Linode Machine object", - fields{ - LinodeMachine: &infrav1alpha1.LinodeMachine{}, - }, - func(mock *mock.Mockk8sClient) { - mock.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { - s := runtime.NewScheme() - infrav1alpha1.AddToScheme(s) - return s - }).Times(2) - mock.EXPECT().Patch(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) - }, - }, - { - "AddFinalizer error - finalizer should not be added to the Linode Machine object. Function returns nil since it was already present", - fields{ - LinodeMachine: &infrav1alpha1.LinodeMachine{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-machine", - Finalizers: []string{infrav1alpha1.GroupVersion.String()}, - }, - }, - }, - func(mock *mock.Mockk8sClient) { - mock.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { + + NewSuite(t, mock.MockK8sClient{}).Run( + Call("scheme 1", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { + s := runtime.NewScheme() + infrav1alpha1.AddToScheme(s) + return s + }) + }), + OneOf( + Path(Call("scheme 2", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { s := runtime.NewScheme() infrav1alpha1.AddToScheme(s) return s - }).Times(1) - }, - }, - } - for _, tt := range tests { - testcase := tt - t.Run(testcase.name, func(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockK8sClient := mock.NewMockk8sClient(ctrl) - - testcase.expects(mockK8sClient) - - mScope, err := NewMachineScope( - context.Background(), - "test-key", - MachineScopeParams{ - Client: mockK8sClient, + }) + })), + Path(Result("has finalizer", func(ctx context.Context, mck Mock) { + mScope, err := NewMachineScope(ctx, "token", MachineScopeParams{ + Client: mck.K8sClient, Cluster: &clusterv1.Cluster{}, Machine: &clusterv1.Machine{}, LinodeCluster: &infrav1alpha1.LinodeCluster{}, - LinodeMachine: testcase.fields.LinodeMachine, - }, - ) - if err != nil { - t.Errorf("NewMachineScope() error = %v", err) - } - - if err := mScope.AddFinalizer(context.Background()); err != nil { - t.Errorf("MachineScope.AddFinalizer() error = %v", err) - } + LinodeMachine: &infrav1alpha1.LinodeMachine{ + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{infrav1alpha1.GroupVersion.String()}, + }, + }, + }) + require.NoError(t, err) + assert.NoError(t, mScope.AddFinalizer(ctx)) + require.Len(t, mScope.LinodeMachine.Finalizers, 1) + assert.Equal(t, mScope.LinodeMachine.Finalizers[0], infrav1alpha1.GroupVersion.String()) + })), + ), + OneOf( + Path( + Call("able to patch", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Patch(ctx, gomock.Any(), gomock.Any()).Return(nil) + }), + Result("finalizer added", func(ctx context.Context, mck Mock) { + mScope, err := NewMachineScope(ctx, "token", MachineScopeParams{ + Client: mck.K8sClient, + Cluster: &clusterv1.Cluster{}, + Machine: &clusterv1.Machine{}, + LinodeCluster: &infrav1alpha1.LinodeCluster{}, + LinodeMachine: &infrav1alpha1.LinodeMachine{}, + }) + require.NoError(t, err) + assert.NoError(t, mScope.AddFinalizer(ctx)) + require.Len(t, mScope.LinodeMachine.Finalizers, 1) + assert.Equal(t, mScope.LinodeMachine.Finalizers[0], infrav1alpha1.GroupVersion.String()) + }), + ), + Path( + Call("unable to patch", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Patch(ctx, gomock.Any(), gomock.Any()).Return(errors.New("fail")) + }), + Result("error", func(ctx context.Context, mck Mock) { + mScope, err := NewMachineScope(ctx, "token", MachineScopeParams{ + Client: mck.K8sClient, + Cluster: &clusterv1.Cluster{}, + Machine: &clusterv1.Machine{}, + LinodeCluster: &infrav1alpha1.LinodeCluster{}, + LinodeMachine: &infrav1alpha1.LinodeMachine{}, + }) + require.NoError(t, err) - if mScope.LinodeMachine.Finalizers[0] != infrav1alpha1.GroupVersion.String() { - t.Errorf("Not able to add finalizer") - } - }) - } + assert.Error(t, mScope.AddFinalizer(ctx)) + }), + ), + ), + ) } func TestNewMachineScope(t *testing.T) { t.Parallel() - type args struct { - apiKey string - params MachineScopeParams - } - tests := []struct { - name string - args args - want *MachineScope - expectedErr error - expects func(mock *mock.Mockk8sClient) - }{ - { - name: "Success - Pass in valid args and get a valid MachineScope", - args: args{ - apiKey: "test-key", - params: MachineScopeParams{ - Client: nil, + + NewSuite(t, mock.MockK8sClient{}).Run( + OneOf( + Path(Result("invalid params", func(ctx context.Context, mck Mock) { + mScope, err := NewMachineScope(ctx, "token", MachineScopeParams{}) + require.ErrorContains(t, err, "is required") + assert.Nil(t, mScope) + })), + Path(Result("no token", func(ctx context.Context, mck Mock) { + mScope, err := NewMachineScope(ctx, "", MachineScopeParams{ + Client: mck.K8sClient, Cluster: &clusterv1.Cluster{}, Machine: &clusterv1.Machine{}, LinodeCluster: &infrav1alpha1.LinodeCluster{}, LinodeMachine: &infrav1alpha1.LinodeMachine{}, - }, - }, - expectedErr: nil, - expects: func(mock *mock.Mockk8sClient) { - mock.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { + }) + require.ErrorContains(t, err, "failed to create linode client") + assert.Nil(t, mScope) + })), + Path( + Call("no secret", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()).Return(apierrors.NewNotFound(schema.GroupResource{}, "example")) + }), + Result("error", func(ctx context.Context, mck Mock) { + mScope, err := NewMachineScope(ctx, "", MachineScopeParams{ + Client: mck.K8sClient, + Cluster: &clusterv1.Cluster{}, + Machine: &clusterv1.Machine{}, + LinodeCluster: &infrav1alpha1.LinodeCluster{}, + LinodeMachine: &infrav1alpha1.LinodeMachine{ + Spec: infrav1alpha1.LinodeMachineSpec{ + CredentialsRef: &corev1.SecretReference{ + Name: "example", + Namespace: "test", + }, + }, + }, + }) + require.ErrorContains(t, err, "credentials from secret ref") + assert.Nil(t, mScope) + }), + ), + ), + OneOf( + Path(Call("valid scheme", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { s := runtime.NewScheme() infrav1alpha1.AddToScheme(s) return s }) - }, - }, - { - name: "Success - Pass in credential ref through MachineScopeParams.LinodeMachine and get a valid MachineScope", - args: args{ - apiKey: "test-key", - params: MachineScopeParams{ - Client: nil, + })), + Path( + Call("invalid scheme", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Scheme().Return(runtime.NewScheme()) + }), + Result("cannot init patch helper", func(ctx context.Context, mck Mock) { + mScope, err := NewMachineScope(ctx, "token", MachineScopeParams{ + Client: mck.K8sClient, + Cluster: &clusterv1.Cluster{}, + Machine: &clusterv1.Machine{}, + LinodeCluster: &infrav1alpha1.LinodeCluster{}, + LinodeMachine: &infrav1alpha1.LinodeMachine{}, + }) + require.ErrorContains(t, err, "failed to init patch helper") + assert.Nil(t, mScope) + }), + ), + ), + OneOf( + Path(Call("credentials in secret", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error { + *obj = corev1.Secret{ + Data: map[string][]byte{ + "apiToken": []byte("token"), + }, + } + return nil + }) + })), + Path(Result("default credentials", func(ctx context.Context, mck Mock) { + mScope, err := NewMachineScope(ctx, "token", MachineScopeParams{ + Client: mck.K8sClient, + Cluster: &clusterv1.Cluster{}, + Machine: &clusterv1.Machine{}, + LinodeCluster: &infrav1alpha1.LinodeCluster{}, + LinodeMachine: &infrav1alpha1.LinodeMachine{}, + }) + require.NoError(t, err) + assert.NotNil(t, mScope) + })), + ), + OneOf( + Path(Result("credentials from LinodeMachine credentialsRef", func(ctx context.Context, mck Mock) { + mScope, err := NewMachineScope(ctx, "", MachineScopeParams{ + Client: mck.K8sClient, Cluster: &clusterv1.Cluster{}, Machine: &clusterv1.Machine{}, LinodeCluster: &infrav1alpha1.LinodeCluster{}, @@ -240,32 +295,13 @@ func TestNewMachineScope(t *testing.T) { }, }, }, - }, - }, - expectedErr: nil, - expects: func(mock *mock.Mockk8sClient) { - mock.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { - s := runtime.NewScheme() - infrav1alpha1.AddToScheme(s) - return s - }) - mock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key types.NamespacedName, obj *corev1.Secret, opts ...client.GetOption) error { - cred := corev1.Secret{ - Data: map[string][]byte{ - "apiToken": []byte("example"), - }, - } - *obj = cred - return nil }) - }, - }, - { - name: "Success - Pass in credential ref through MachineScopeParams.LinodeCluster and get a valid MachineScope", - args: args{ - apiKey: "test-key", - params: MachineScopeParams{ - Client: nil, + require.NoError(t, err) + assert.NotNil(t, mScope) + })), + Path(Result("credentials from LinodeCluster credentialsRef", func(ctx context.Context, mck Mock) { + mScope, err := NewMachineScope(ctx, "token", MachineScopeParams{ + Client: mck.K8sClient, Cluster: &clusterv1.Cluster{}, Machine: &clusterv1.Machine{}, LinodeCluster: &infrav1alpha1.LinodeCluster{ @@ -277,203 +313,29 @@ func TestNewMachineScope(t *testing.T) { }, }, LinodeMachine: &infrav1alpha1.LinodeMachine{}, - }, - }, - expectedErr: nil, - expects: func(mock *mock.Mockk8sClient) { - mock.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { - s := runtime.NewScheme() - infrav1alpha1.AddToScheme(s) - return s }) - mock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key types.NamespacedName, obj *corev1.Secret, opts ...client.GetOption) error { - cred := corev1.Secret{ - Data: map[string][]byte{ - "apiToken": []byte("example"), - }, - } - *obj = cred - return nil - }) - }, - }, - { - name: "Error - Pass in credential ref through MachineScopeParams.LinodeCluster and getCredentialDataFromRef() returns error", - args: args{ - apiKey: "test-key", - params: MachineScopeParams{ - Client: nil, - Cluster: &clusterv1.Cluster{}, - Machine: &clusterv1.Machine{}, - LinodeCluster: &infrav1alpha1.LinodeCluster{ - Spec: infrav1alpha1.LinodeClusterSpec{ - CredentialsRef: &corev1.SecretReference{ - Name: "example", - Namespace: "test", - }, - }, - }, - LinodeMachine: &infrav1alpha1.LinodeMachine{}, - }, - }, - expectedErr: errors.New("credentials from cluster secret ref: get credentials secret test/example: Creds not found"), - expects: func(mock *mock.Mockk8sClient) { - mock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("Creds not found")) - }, - }, - { - name: "Error - Pass in invalid args and get an error. Set ClusterScopeParams.Cluster to nil", - args: args{ - apiKey: "test-key", - params: MachineScopeParams{ - Client: nil, - Cluster: nil, - Machine: &clusterv1.Machine{}, - LinodeCluster: &infrav1alpha1.LinodeCluster{}, - LinodeMachine: &infrav1alpha1.LinodeMachine{}, - }, - }, - expectedErr: errors.New("custer is required when creating a MachineScope"), - expects: func(mock *mock.Mockk8sClient) {}, - }, - { - name: "Error - Pass in valid args but couldn't get patch helper", - args: args{ - apiKey: "test-key", - params: MachineScopeParams{ - Client: nil, - Cluster: &clusterv1.Cluster{}, - Machine: &clusterv1.Machine{}, - LinodeCluster: &infrav1alpha1.LinodeCluster{}, - LinodeMachine: &infrav1alpha1.LinodeMachine{}, - }, - }, - expectedErr: errors.New("failed to init patch helper:"), - expects: func(mock *mock.Mockk8sClient) { - mock.EXPECT().Scheme().Return(runtime.NewScheme()) - }, - }, - { - name: "Error - createLinodeClient() returns error for passing empty apiKey", - args: args{ - apiKey: "", - params: MachineScopeParams{ - Client: nil, - Cluster: &clusterv1.Cluster{}, - Machine: &clusterv1.Machine{}, - LinodeCluster: &infrav1alpha1.LinodeCluster{}, - LinodeMachine: &infrav1alpha1.LinodeMachine{}, - }, - }, - expectedErr: errors.New("failed to create linode client: missing Linode API key"), - expects: func(mock *mock.Mockk8sClient) {}, - }, - } - - for _, tt := range tests { - testcase := tt - t.Run(testcase.name, func(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockK8sClient := mock.NewMockk8sClient(ctrl) - - testcase.expects(mockK8sClient) - - testcase.args.params.Client = mockK8sClient - - got, err := NewMachineScope(context.Background(), testcase.args.apiKey, testcase.args.params) - - if testcase.expectedErr != nil { - assert.ErrorContains(t, err, testcase.expectedErr.Error()) - } else { - assert.NotEmpty(t, got) - } - }) - } + require.NoError(t, err) + assert.NotNil(t, mScope) + })), + ), + ) } func TestMachineScopeGetBootstrapData(t *testing.T) { t.Parallel() - type fields struct { - Cluster *clusterv1.Cluster - Machine *clusterv1.Machine - LinodeClient *linodego.Client - LinodeCluster *infrav1alpha1.LinodeCluster - LinodeMachine *infrav1alpha1.LinodeMachine - } - tests := []struct { - name string - fields fields - want []byte - expectedErr error - expects func(mock *mock.Mockk8sClient) - }{ - // TODO: Add test cases. - { - name: "Success - Using a valid MachineScope. Get bootstrap data", - fields: fields{ - Cluster: &clusterv1.Cluster{}, - Machine: &clusterv1.Machine{ - Spec: clusterv1.MachineSpec{ - Bootstrap: clusterv1.Bootstrap{ - DataSecretName: ptr.To("test-data"), - }, - }, - }, - LinodeClient: &linodego.Client{}, - LinodeCluster: &infrav1alpha1.LinodeCluster{}, - LinodeMachine: &infrav1alpha1.LinodeMachine{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-linode-machine", - Namespace: "test-namespace", - }, - }, - }, - want: []byte("test-data"), - expectedErr: nil, - expects: func(mock *mock.Mockk8sClient) { - mock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key types.NamespacedName, obj *corev1.Secret, opts ...client.GetOption) error { - cred := corev1.Secret{ - Data: map[string][]byte{ - "value": []byte("test-data"), - }, - } - *obj = cred + + NewSuite(t, mock.MockK8sClient{}).Run( + Call("able to get secret", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error { + secret := corev1.Secret{Data: map[string][]byte{"value": []byte("test-data")}} + *obj = secret return nil }) - }, - }, - { - name: "Error - Set MachineScope.Machine.Spec.Bootstrap.DataSecretName to nil. Returns an error", - fields: fields{ - Cluster: &clusterv1.Cluster{}, - Machine: &clusterv1.Machine{ - Spec: clusterv1.MachineSpec{ - Bootstrap: clusterv1.Bootstrap{ - DataSecretName: nil, - }, - }, - }, - LinodeClient: &linodego.Client{}, - LinodeCluster: &infrav1alpha1.LinodeCluster{}, - LinodeMachine: &infrav1alpha1.LinodeMachine{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-linode-machine", - Namespace: "test-namespace", - }, - }, - }, - want: nil, - expectedErr: errors.New("bootstrap data secret is nil for LinodeMachine test-namespace/test-linode-machine"), - expects: func(mock *mock.Mockk8sClient) {}, - }, - { - name: "Error - client.Get return an error while retrieving bootstrap data secret", - fields: fields{ - Cluster: &clusterv1.Cluster{}, + }), + Result("success", func(ctx context.Context, mck Mock) { + mScope := MachineScope{ + Client: mck.K8sClient, Machine: &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ @@ -481,25 +343,40 @@ func TestMachineScopeGetBootstrapData(t *testing.T) { }, }, }, - LinodeClient: &linodego.Client{}, - LinodeCluster: &infrav1alpha1.LinodeCluster{}, - LinodeMachine: &infrav1alpha1.LinodeMachine{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-linode-machine", - Namespace: "test-namespace", - }, - }, - }, - want: nil, - expectedErr: errors.New("failed to retrieve bootstrap data secret for LinodeMachine test-namespace/test-linode-machine"), - expects: func(mock *mock.Mockk8sClient) { - mock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("test-error")) - }, - }, - { - name: "Error - client.Get return some data but it doesn't contain the bootstrap data secret and secret key 'value' is missing", - fields: fields{ - Cluster: &clusterv1.Cluster{}, + LinodeMachine: &infrav1alpha1.LinodeMachine{}, + } + + data, err := mScope.GetBootstrapData(ctx) + require.NoError(t, err) + assert.Equal(t, data, []byte("test-data")) + }), + OneOf( + Path(Call("unable to get secret", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()). + Return(apierrors.NewNotFound(schema.GroupResource{}, "test-data")) + })), + Path(Call("secret is missing data", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error { + *obj = corev1.Secret{} + return nil + }) + })), + Path(Result("secret ref missing", func(ctx context.Context, mck Mock) { + mScope := MachineScope{ + Client: mck.K8sClient, + Machine: &clusterv1.Machine{}, + LinodeMachine: &infrav1alpha1.LinodeMachine{}, + } + + data, err := mScope.GetBootstrapData(ctx) + require.ErrorContains(t, err, "bootstrap data secret is nil") + assert.Empty(t, data) + })), + ), + Result("error", func(ctx context.Context, mck Mock) { + mScope := MachineScope{ + Client: mck.K8sClient, Machine: &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ @@ -507,58 +384,12 @@ func TestMachineScopeGetBootstrapData(t *testing.T) { }, }, }, - LinodeClient: &linodego.Client{}, - LinodeCluster: &infrav1alpha1.LinodeCluster{}, - LinodeMachine: &infrav1alpha1.LinodeMachine{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-linode-machine", - Namespace: "test-namespace", - }, - }, - }, - want: nil, - expectedErr: errors.New("bootstrap data secret value key is missing for LinodeMachine test-namespace/test-linode-machine"), - expects: func(mock *mock.Mockk8sClient) { - mock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, key types.NamespacedName, obj *corev1.Secret, opts ...client.GetOption) error { - cred := corev1.Secret{ - Data: map[string][]byte{}, - } - *obj = cred - return nil - }, - ) - }, - }, - } - for _, tt := range tests { - testcase := tt - t.Run(testcase.name, func(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockK8sClient := mock.NewMockk8sClient(ctrl) - testcase.expects(mockK8sClient) - - mScope := &MachineScope{ - Client: mockK8sClient, - PatchHelper: &patch.Helper{}, // empty patch helper - Cluster: testcase.fields.Cluster, - Machine: testcase.fields.Machine, - LinodeClient: testcase.fields.LinodeClient, - LinodeCluster: testcase.fields.LinodeCluster, - LinodeMachine: testcase.fields.LinodeMachine, + LinodeMachine: &infrav1alpha1.LinodeMachine{}, } - got, err := mScope.GetBootstrapData(context.Background()) - - if testcase.expectedErr != nil { - assert.EqualError(t, err, testcase.expectedErr.Error()) - } else { - assert.Equal(t, testcase.want, got) - } - }) - } + data, err := mScope.GetBootstrapData(ctx) + require.Error(t, err) + assert.Empty(t, data) + }), + ) } diff --git a/cloud/scope/object_storage_bucket.go b/cloud/scope/object_storage_bucket.go index 51f48e97e..81eb74f8a 100644 --- a/cloud/scope/object_storage_bucket.go +++ b/cloud/scope/object_storage_bucket.go @@ -32,14 +32,13 @@ stringData: secret_key_ro: %s` type ObjectStorageBucketScopeParams struct { - Client k8sClient - LinodeClientBuilder LinodeObjectStorageClientBuilder - Bucket *infrav1alpha1.LinodeObjectStorageBucket - Logger *logr.Logger + Client K8sClient + Bucket *infrav1alpha1.LinodeObjectStorageBucket + Logger *logr.Logger } type ObjectStorageBucketScope struct { - Client k8sClient + Client K8sClient Bucket *infrav1alpha1.LinodeObjectStorageBucket Logger logr.Logger LinodeClient LinodeObjectStorageClient @@ -56,9 +55,6 @@ func validateObjectStorageBucketScopeParams(params ObjectStorageBucketScopeParam if params.Logger == nil { return errors.New("logger is required when creating an ObjectStorageBucketScope") } - if params.LinodeClientBuilder == nil { - return errors.New("LinodeClientBuilder is required when creating an ObjectStorageBucketScope") - } return nil } @@ -76,7 +72,7 @@ func NewObjectStorageBucketScope(ctx context.Context, apiKey string, params Obje } apiKey = string(data) } - linodeClient, err := params.LinodeClientBuilder(apiKey) + linodeClient, err := CreateLinodeClient(apiKey) if err != nil { return nil, fmt.Errorf("failed to create linode client: %w", err) } diff --git a/cloud/scope/object_storage_bucket_test.go b/cloud/scope/object_storage_bucket_test.go index e36f82e9e..13f08929e 100644 --- a/cloud/scope/object_storage_bucket_test.go +++ b/cloud/scope/object_storage_bucket_test.go @@ -36,18 +36,16 @@ func TestValidateObjectStorageBucketScopeParams(t *testing.T) { { name: "Success - Valid ObjectStorageBucketScopeParams", params: ObjectStorageBucketScopeParams{ - LinodeClientBuilder: CreateLinodeObjectStorageClient, - Bucket: &infrav1alpha1.LinodeObjectStorageBucket{}, - Logger: &logr.Logger{}, + Bucket: &infrav1alpha1.LinodeObjectStorageBucket{}, + Logger: &logr.Logger{}, }, expectedErr: nil, }, { name: "Failure - Invalid ObjectStorageBucketScopeParams. Logger is nil", params: ObjectStorageBucketScopeParams{ - LinodeClientBuilder: CreateLinodeObjectStorageClient, - Bucket: &infrav1alpha1.LinodeObjectStorageBucket{}, - Logger: nil, + Bucket: &infrav1alpha1.LinodeObjectStorageBucket{}, + Logger: nil, }, expectedErr: fmt.Errorf("logger is required when creating an ObjectStorageBucketScope"), }, @@ -55,21 +53,11 @@ func TestValidateObjectStorageBucketScopeParams(t *testing.T) { { name: "Failure - Invalid ObjectStorageBucketScopeParams. Bucket is nil", params: ObjectStorageBucketScopeParams{ - LinodeClientBuilder: CreateLinodeObjectStorageClient, - Bucket: nil, - Logger: &logr.Logger{}, + Bucket: nil, + Logger: &logr.Logger{}, }, expectedErr: fmt.Errorf("object storage bucket is required when creating an ObjectStorageBucketScope"), }, - { - name: "Failure - Invalid ObjectStorageBucketScopeParams. LinodeClientBuilder is nil", - params: ObjectStorageBucketScopeParams{ - LinodeClientBuilder: nil, - Bucket: &infrav1alpha1.LinodeObjectStorageBucket{}, - Logger: &logr.Logger{}, - }, - expectedErr: fmt.Errorf("LinodeClientBuilder is required when creating an ObjectStorageBucketScope"), - }, } for _, tt := range tests { testcase := tt @@ -94,7 +82,7 @@ func TestNewObjectStorageBucketScope(t *testing.T) { name string args args expectedErr error - expects func(k8s *mock.Mockk8sClient) + expects func(k8s *mock.MockK8sClient) clientBuildFunc func(apiKey string) (LinodeObjectStorageClient, error) }{ { @@ -102,14 +90,13 @@ func TestNewObjectStorageBucketScope(t *testing.T) { args: args{ apiKey: "apikey", params: ObjectStorageBucketScopeParams{ - LinodeClientBuilder: CreateLinodeObjectStorageClient, - Client: nil, - Bucket: &infrav1alpha1.LinodeObjectStorageBucket{}, - Logger: &logr.Logger{}, + Client: nil, + Bucket: &infrav1alpha1.LinodeObjectStorageBucket{}, + Logger: &logr.Logger{}, }, }, expectedErr: nil, - expects: func(k8s *mock.Mockk8sClient) { + expects: func(k8s *mock.MockK8sClient) { k8s.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { s := runtime.NewScheme() infrav1alpha1.AddToScheme(s) @@ -122,8 +109,7 @@ func TestNewObjectStorageBucketScope(t *testing.T) { args: args{ apiKey: "apikey", params: ObjectStorageBucketScopeParams{ - LinodeClientBuilder: CreateLinodeObjectStorageClient, - Client: nil, + Client: nil, Bucket: &infrav1alpha1.LinodeObjectStorageBucket{ Spec: infrav1alpha1.LinodeObjectStorageBucketSpec{ CredentialsRef: &corev1.SecretReference{ @@ -136,7 +122,7 @@ func TestNewObjectStorageBucketScope(t *testing.T) { }, }, expectedErr: nil, - expects: func(k8s *mock.Mockk8sClient) { + expects: func(k8s *mock.MockK8sClient) { k8s.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { s := runtime.NewScheme() infrav1alpha1.AddToScheme(s) @@ -160,21 +146,20 @@ func TestNewObjectStorageBucketScope(t *testing.T) { params: ObjectStorageBucketScopeParams{}, }, expectedErr: fmt.Errorf("object storage bucket is required when creating an ObjectStorageBucketScope"), - expects: func(k8s *mock.Mockk8sClient) {}, + expects: func(k8s *mock.MockK8sClient) {}, }, { name: "Error - patchHelper returns error. Checking error handle for when new patchHelper is invoked", args: args{ apiKey: "apikey", params: ObjectStorageBucketScopeParams{ - LinodeClientBuilder: CreateLinodeObjectStorageClient, - Client: nil, - Bucket: &infrav1alpha1.LinodeObjectStorageBucket{}, - Logger: &logr.Logger{}, + Client: nil, + Bucket: &infrav1alpha1.LinodeObjectStorageBucket{}, + Logger: &logr.Logger{}, }, }, expectedErr: fmt.Errorf("failed to init patch helper:"), - expects: func(k8s *mock.Mockk8sClient) { + expects: func(k8s *mock.MockK8sClient) { k8s.EXPECT().Scheme().Return(runtime.NewScheme()) }, }, @@ -183,8 +168,7 @@ func TestNewObjectStorageBucketScope(t *testing.T) { args: args{ apiKey: "test-key", params: ObjectStorageBucketScopeParams{ - LinodeClientBuilder: CreateLinodeObjectStorageClient, - Client: nil, + Client: nil, Bucket: &infrav1alpha1.LinodeObjectStorageBucket{ Spec: infrav1alpha1.LinodeObjectStorageBucketSpec{ CredentialsRef: &corev1.SecretReference{ @@ -197,7 +181,7 @@ func TestNewObjectStorageBucketScope(t *testing.T) { }, }, expectedErr: fmt.Errorf("credentials from cluster secret ref: get credentials secret test/example: failed to get secret"), - expects: func(mock *mock.Mockk8sClient) { + expects: func(mock *mock.MockK8sClient) { mock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("failed to get secret")) }, }, @@ -206,14 +190,13 @@ func TestNewObjectStorageBucketScope(t *testing.T) { args: args{ apiKey: "", params: ObjectStorageBucketScopeParams{ - LinodeClientBuilder: CreateLinodeObjectStorageClient, - Client: nil, - Bucket: &infrav1alpha1.LinodeObjectStorageBucket{}, - Logger: &logr.Logger{}, + Client: nil, + Bucket: &infrav1alpha1.LinodeObjectStorageBucket{}, + Logger: &logr.Logger{}, }, }, expectedErr: fmt.Errorf("failed to create linode client: missing Linode API key"), - expects: func(mock *mock.Mockk8sClient) {}, + expects: func(mock *mock.MockK8sClient) {}, }, } for _, tt := range tests { @@ -224,7 +207,7 @@ func TestNewObjectStorageBucketScope(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockK8sClient := mock.NewMockk8sClient(ctrl) + mockK8sClient := mock.NewMockK8sClient(ctrl) testcase.expects(mockK8sClient) @@ -246,12 +229,12 @@ func TestObjectStorageBucketScopeMethods(t *testing.T) { tests := []struct { name string Bucket *infrav1alpha1.LinodeObjectStorageBucket - expects func(mock *mock.Mockk8sClient) + expects func(mock *mock.MockK8sClient) }{ { name: "Success - finalizer should be added to the Linode Object Storage Bucket object", Bucket: &infrav1alpha1.LinodeObjectStorageBucket{}, - expects: func(mock *mock.Mockk8sClient) { + expects: func(mock *mock.MockK8sClient) { mock.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { s := runtime.NewScheme() infrav1alpha1.AddToScheme(s) @@ -267,7 +250,7 @@ func TestObjectStorageBucketScopeMethods(t *testing.T) { Finalizers: []string{infrav1alpha1.GroupVersion.String()}, }, }, - expects: func(mock *mock.Mockk8sClient) { + expects: func(mock *mock.MockK8sClient) { mock.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { s := runtime.NewScheme() infrav1alpha1.AddToScheme(s) @@ -284,7 +267,7 @@ func TestObjectStorageBucketScopeMethods(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockK8sClient := mock.NewMockk8sClient(ctrl) + mockK8sClient := mock.NewMockK8sClient(ctrl) testcase.expects(mockK8sClient) @@ -292,10 +275,9 @@ func TestObjectStorageBucketScopeMethods(t *testing.T) { context.Background(), "test-key", ObjectStorageBucketScopeParams{ - Client: mockK8sClient, - Bucket: testcase.Bucket, - Logger: &logr.Logger{}, - LinodeClientBuilder: CreateLinodeObjectStorageClient, + Client: mockK8sClient, + Bucket: testcase.Bucket, + Logger: &logr.Logger{}, }) if err != nil { t.Errorf("NewObjectStorageBucketScope() error = %v", err) @@ -319,7 +301,7 @@ func TestGenerateKeySecret(t *testing.T) { Bucket *infrav1alpha1.LinodeObjectStorageBucket keys [NumAccessKeys]*linodego.ObjectStorageKey expectedErr error - expects func(mock *mock.Mockk8sClient) + expects func(mock *mock.MockK8sClient) }{ { name: "happy path", @@ -362,7 +344,7 @@ func TestGenerateKeySecret(t *testing.T) { }, }, }, - expects: func(mock *mock.Mockk8sClient) { + expects: func(mock *mock.MockK8sClient) { mock.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { s := runtime.NewScheme() infrav1alpha1.AddToScheme(s) @@ -438,7 +420,7 @@ func TestGenerateKeySecret(t *testing.T) { }, }, }, - expects: func(mock *mock.Mockk8sClient) { + expects: func(mock *mock.MockK8sClient) { mock.EXPECT().Scheme().Return(runtime.NewScheme()) }, expectedErr: fmt.Errorf("could not set owner ref on access key secret"), @@ -449,12 +431,12 @@ func TestGenerateKeySecret(t *testing.T) { t.Run(testcase.name, func(t *testing.T) { t.Parallel() - var mockClient *mock.Mockk8sClient + var mockClient *mock.MockK8sClient if testcase.expects != nil { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockClient = mock.NewMockk8sClient(ctrl) + mockClient = mock.NewMockK8sClient(ctrl) testcase.expects(mockClient) } @@ -564,7 +546,7 @@ func TestShouldRestoreKeySecret(t *testing.T) { tests := []struct { name string bucket *infrav1alpha1.LinodeObjectStorageBucket - expects func(k8s *mock.Mockk8sClient) + expects func(k8s *mock.MockK8sClient) want bool expectedErr error }{ @@ -588,7 +570,7 @@ func TestShouldRestoreKeySecret(t *testing.T) { KeySecretName: ptr.To("secret"), }, }, - expects: func(k8s *mock.Mockk8sClient) { + expects: func(k8s *mock.MockK8sClient) { k8s.EXPECT(). Get(gomock.Any(), client.ObjectKey{Namespace: "ns", Name: "secret"}, gomock.Any()). Return(nil) @@ -606,7 +588,7 @@ func TestShouldRestoreKeySecret(t *testing.T) { KeySecretName: ptr.To("secret"), }, }, - expects: func(k8s *mock.Mockk8sClient) { + expects: func(k8s *mock.MockK8sClient) { k8s.EXPECT(). Get(gomock.Any(), client.ObjectKey{Namespace: "ns", Name: "secret"}, gomock.Any()). Return(apierrors.NewNotFound(schema.GroupResource{Resource: "Secret"}, "secret")) @@ -624,7 +606,7 @@ func TestShouldRestoreKeySecret(t *testing.T) { KeySecretName: ptr.To("secret"), }, }, - expects: func(k8s *mock.Mockk8sClient) { + expects: func(k8s *mock.MockK8sClient) { k8s.EXPECT(). Get(gomock.Any(), client.ObjectKey{Namespace: "ns", Name: "secret"}, gomock.Any()). Return(errors.New("unexpected error")) @@ -637,12 +619,12 @@ func TestShouldRestoreKeySecret(t *testing.T) { t.Run(testcase.name, func(t *testing.T) { t.Parallel() - var mockClient *mock.Mockk8sClient + var mockClient *mock.MockK8sClient if testcase.expects != nil { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockClient = mock.NewMockk8sClient(ctrl) + mockClient = mock.NewMockK8sClient(ctrl) testcase.expects(mockClient) } diff --git a/cloud/scope/vpc.go b/cloud/scope/vpc.go index 5cc3b8eeb..1225978fa 100644 --- a/cloud/scope/vpc.go +++ b/cloud/scope/vpc.go @@ -30,7 +30,7 @@ import ( // VPCScope defines the basic context for an actuator to operate upon. type VPCScope struct { - client k8sClient + client K8sClient PatchHelper *patch.Helper LinodeClient *linodego.Client @@ -39,7 +39,7 @@ type VPCScope struct { // VPCScopeParams defines the input parameters used to create a new Scope. type VPCScopeParams struct { - Client k8sClient + Client K8sClient LinodeVPC *infrav1alpha1.LinodeVPC } diff --git a/cloud/scope/vpc_test.go b/cloud/scope/vpc_test.go index 1110f6b4d..b1bfdc1ef 100644 --- a/cloud/scope/vpc_test.go +++ b/cloud/scope/vpc_test.go @@ -75,7 +75,7 @@ func TestNewVPCScope(t *testing.T) { args args want *VPCScope expectedError error - expects func(m *mock.Mockk8sClient) + expects func(m *mock.MockK8sClient) }{ { name: "Success - Pass in valid args and get a valid VPCScope", @@ -86,7 +86,7 @@ func TestNewVPCScope(t *testing.T) { }, }, expectedError: nil, - expects: func(mock *mock.Mockk8sClient) { + expects: func(mock *mock.MockK8sClient) { mock.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { s := runtime.NewScheme() infrav1alpha1.AddToScheme(s) @@ -110,7 +110,7 @@ func TestNewVPCScope(t *testing.T) { }, }, expectedError: nil, - expects: func(mock *mock.Mockk8sClient) { + expects: func(mock *mock.MockK8sClient) { mock.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { s := runtime.NewScheme() infrav1alpha1.AddToScheme(s) @@ -133,7 +133,7 @@ func TestNewVPCScope(t *testing.T) { apiKey: "test-key", params: VPCScopeParams{}, }, - expects: func(mock *mock.Mockk8sClient) {}, + expects: func(mock *mock.MockK8sClient) {}, expectedError: fmt.Errorf("linodeVPC is required when creating a VPCScope"), }, { @@ -151,7 +151,7 @@ func TestNewVPCScope(t *testing.T) { }, }, }, - expects: func(mock *mock.Mockk8sClient) { + expects: func(mock *mock.MockK8sClient) { mock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("test error")) }, expectedError: fmt.Errorf("credentials from secret ref: get credentials secret test-namespace/test-name: test error"), @@ -164,7 +164,7 @@ func TestNewVPCScope(t *testing.T) { LinodeVPC: &infrav1alpha1.LinodeVPC{}, }, }, - expects: func(mock *mock.Mockk8sClient) {}, + expects: func(mock *mock.MockK8sClient) {}, expectedError: fmt.Errorf("failed to create linode client: missing Linode API key"), }, { @@ -176,7 +176,7 @@ func TestNewVPCScope(t *testing.T) { }, }, expectedError: fmt.Errorf("failed to init patch helper:"), - expects: func(mock *mock.Mockk8sClient) { + expects: func(mock *mock.MockK8sClient) { mock.EXPECT().Scheme().Return(runtime.NewScheme()) }, }, @@ -188,7 +188,7 @@ func TestNewVPCScope(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockK8sClient := mock.NewMockk8sClient(ctrl) + mockK8sClient := mock.NewMockK8sClient(ctrl) testcase.expects(mockK8sClient) @@ -210,7 +210,7 @@ func TestVPCScopeMethods(t *testing.T) { tests := []struct { name string LinodeVPC *infrav1alpha1.LinodeVPC - expects func(mock *mock.Mockk8sClient) + expects func(mock *mock.MockK8sClient) }{ { name: "Success - finalizer should be added to the Linode VPC object", @@ -219,7 +219,7 @@ func TestVPCScopeMethods(t *testing.T) { Name: "test-vpc", }, }, - expects: func(mock *mock.Mockk8sClient) { + expects: func(mock *mock.MockK8sClient) { mock.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { s := runtime.NewScheme() infrav1alpha1.AddToScheme(s) @@ -236,7 +236,7 @@ func TestVPCScopeMethods(t *testing.T) { Finalizers: []string{infrav1alpha1.GroupVersion.String()}, }, }, - expects: func(mock *mock.Mockk8sClient) { + expects: func(mock *mock.MockK8sClient) { mock.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { s := runtime.NewScheme() infrav1alpha1.AddToScheme(s) @@ -253,7 +253,7 @@ func TestVPCScopeMethods(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockK8sClient := mock.NewMockk8sClient(ctrl) + mockK8sClient := mock.NewMockK8sClient(ctrl) testcase.expects(mockK8sClient) diff --git a/cmd/main.go b/cmd/main.go index ceab65f86..2131b43b2 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -32,7 +32,6 @@ import ( metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" infrastructurev1alpha1 "github.com/linode/cluster-api-provider-linode/api/v1alpha1" - "github.com/linode/cluster-api-provider-linode/cloud/scope" controller2 "github.com/linode/cluster-api-provider-linode/controller" "github.com/linode/cluster-api-provider-linode/version" @@ -141,12 +140,11 @@ func main() { os.Exit(1) } if err = (&controller2.LinodeObjectStorageBucketReconciler{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("LinodeObjectStorageBucketReconciler"), - Recorder: mgr.GetEventRecorderFor("LinodeObjectStorageBucketReconciler"), - WatchFilterValue: objectStorageBucketWatchFilter, - LinodeApiKey: linodeToken, - LinodeClientBuilder: scope.CreateLinodeObjectStorageClient, + Client: mgr.GetClient(), + Logger: ctrl.Log.WithName("LinodeObjectStorageBucketReconciler"), + Recorder: mgr.GetEventRecorderFor("LinodeObjectStorageBucketReconciler"), + WatchFilterValue: objectStorageBucketWatchFilter, + LinodeApiKey: linodeToken, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "LinodeObjectStorageBucket") os.Exit(1) diff --git a/controller/linodemachine_controller_helpers_test.go b/controller/linodemachine_controller_helpers_test.go index eae9b5d98..2f88f001b 100644 --- a/controller/linodemachine_controller_helpers_test.go +++ b/controller/linodemachine_controller_helpers_test.go @@ -86,7 +86,7 @@ func TestSetUserData(t *testing.T) { createConfig *linodego.InstanceCreateOptions wantConfig *linodego.InstanceCreateOptions expectedError error - expects func(client *mock.MockLinodeMachineClient, kClient *mock.Mockk8sClient) + expects func(client *mock.MockLinodeMachineClient, kClient *mock.MockK8sClient) }{ { name: "Success - SetUserData metadata", @@ -110,7 +110,7 @@ func TestSetUserData(t *testing.T) { wantConfig: &linodego.InstanceCreateOptions{Metadata: &linodego.InstanceMetadataOptions{ UserData: b64.StdEncoding.EncodeToString([]byte("test-data")), }}, - expects: func(mockClient *mock.MockLinodeMachineClient, kMock *mock.Mockk8sClient) { + expects: func(mockClient *mock.MockLinodeMachineClient, kMock *mock.MockK8sClient) { kMock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key types.NamespacedName, obj *corev1.Secret, opts ...client.GetOption) error { cred := corev1.Secret{ Data: map[string][]byte{ @@ -151,7 +151,7 @@ func TestSetUserData(t *testing.T) { "instancedata": b64.StdEncoding.EncodeToString([]byte("label: test-cluster\nregion: us-east\ntype: g6-standard-1")), "userdata": b64.StdEncoding.EncodeToString([]byte("test-data")), }}, - expects: func(mockClient *mock.MockLinodeMachineClient, kMock *mock.Mockk8sClient) { + expects: func(mockClient *mock.MockLinodeMachineClient, kMock *mock.MockK8sClient) { kMock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key types.NamespacedName, obj *corev1.Secret, opts ...client.GetOption) error { cred := corev1.Secret{ Data: map[string][]byte{ @@ -191,7 +191,7 @@ func TestSetUserData(t *testing.T) { }}, createConfig: &linodego.InstanceCreateOptions{}, wantConfig: &linodego.InstanceCreateOptions{}, - expects: func(mockClient *mock.MockLinodeMachineClient, kMock *mock.Mockk8sClient) { + expects: func(mockClient *mock.MockLinodeMachineClient, kMock *mock.MockK8sClient) { kMock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key types.NamespacedName, obj *corev1.Secret, opts ...client.GetOption) error { cred := corev1.Secret{ Data: map[string][]byte{ @@ -225,7 +225,7 @@ func TestSetUserData(t *testing.T) { }}, createConfig: &linodego.InstanceCreateOptions{}, wantConfig: &linodego.InstanceCreateOptions{}, - expects: func(c *mock.MockLinodeMachineClient, k *mock.Mockk8sClient) { + expects: func(c *mock.MockLinodeMachineClient, k *mock.MockK8sClient) { }, expectedError: fmt.Errorf("bootstrap data secret is nil for LinodeMachine default/test-cluster"), }, @@ -249,7 +249,7 @@ func TestSetUserData(t *testing.T) { }}, createConfig: &linodego.InstanceCreateOptions{}, wantConfig: &linodego.InstanceCreateOptions{}, - expects: func(mockClient *mock.MockLinodeMachineClient, kMock *mock.Mockk8sClient) { + expects: func(mockClient *mock.MockLinodeMachineClient, kMock *mock.MockK8sClient) { kMock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key types.NamespacedName, obj *corev1.Secret, opts ...client.GetOption) error { cred := corev1.Secret{ Data: map[string][]byte{ @@ -283,7 +283,7 @@ func TestSetUserData(t *testing.T) { }}, createConfig: &linodego.InstanceCreateOptions{}, wantConfig: &linodego.InstanceCreateOptions{}, - expects: func(mockClient *mock.MockLinodeMachineClient, kMock *mock.Mockk8sClient) { + expects: func(mockClient *mock.MockLinodeMachineClient, kMock *mock.MockK8sClient) { kMock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key types.NamespacedName, obj *corev1.Secret, opts ...client.GetOption) error { cred := corev1.Secret{ Data: map[string][]byte{ @@ -323,7 +323,7 @@ func TestSetUserData(t *testing.T) { "instancedata": b64.StdEncoding.EncodeToString([]byte("label: test-cluster\nregion: us-east\ntype: g6-standard-1")), "userdata": b64.StdEncoding.EncodeToString([]byte("test-data")), }}, - expects: func(mockClient *mock.MockLinodeMachineClient, kMock *mock.Mockk8sClient) { + expects: func(mockClient *mock.MockLinodeMachineClient, kMock *mock.MockK8sClient) { kMock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key types.NamespacedName, obj *corev1.Secret, opts ...client.GetOption) error { cred := corev1.Secret{ Data: map[string][]byte{ @@ -351,7 +351,7 @@ func TestSetUserData(t *testing.T) { defer ctrl.Finish() mockClient := mock.NewMockLinodeMachineClient(ctrl) - mockK8sClient := mock.NewMockk8sClient(ctrl) + mockK8sClient := mock.NewMockK8sClient(ctrl) testcase.machineScope.LinodeClient = mockClient testcase.machineScope.Client = mockK8sClient testcase.expects(mockClient, mockK8sClient) diff --git a/controller/linodeobjectstoragebucket_controller.go b/controller/linodeobjectstoragebucket_controller.go index faff07e7e..bb56739e1 100644 --- a/controller/linodeobjectstoragebucket_controller.go +++ b/controller/linodeobjectstoragebucket_controller.go @@ -50,12 +50,11 @@ import ( // LinodeObjectStorageBucketReconciler reconciles a LinodeObjectStorageBucket object type LinodeObjectStorageBucketReconciler struct { client.Client - Logger logr.Logger - Recorder record.EventRecorder - LinodeApiKey string - LinodeClientBuilder scope.LinodeObjectStorageClientBuilder - WatchFilterValue string - ReconcileTimeout time.Duration + Logger logr.Logger + Recorder record.EventRecorder + LinodeApiKey string + WatchFilterValue string + ReconcileTimeout time.Duration } // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=linodeobjectstoragebuckets,verbs=get;list;watch;create;update;patch;delete @@ -93,10 +92,9 @@ func (r *LinodeObjectStorageBucketReconciler) Reconcile(ctx context.Context, req ctx, r.LinodeApiKey, scope.ObjectStorageBucketScopeParams{ - Client: r.Client, - LinodeClientBuilder: r.LinodeClientBuilder, - Bucket: objectStorageBucket, - Logger: &logger, + Client: r.Client, + Bucket: objectStorageBucket, + Logger: &logger, }, ) if err != nil { diff --git a/controller/linodeobjectstoragebucket_controller_test.go b/controller/linodeobjectstoragebucket_controller_test.go index 4c6e90285..2057e9369 100644 --- a/controller/linodeobjectstoragebucket_controller_test.go +++ b/controller/linodeobjectstoragebucket_controller_test.go @@ -17,7 +17,7 @@ package controller import ( - "bytes" + "context" "errors" "fmt" "time" @@ -30,12 +30,10 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/yaml" @@ -44,11 +42,12 @@ import ( "github.com/linode/cluster-api-provider-linode/mock" "github.com/linode/cluster-api-provider-linode/util" + . "github.com/linode/cluster-api-provider-linode/mock/mocktest" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) -type AccessKeySecret struct { +type accessKeySecret struct { APIVersion string `json:"apiVersion"` Kind string `json:"kind"` Metadata struct { @@ -66,16 +65,8 @@ type AccessKeySecret struct { } `json:"stringData"` } -func mockLinodeClientBuilder(m *mock.MockLinodeObjectStorageClient) scope.LinodeObjectStorageClientBuilder { - return func(_ string) (scope.LinodeObjectStorageClient, error) { - return m, nil - } -} - var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() { - var mockCtrl *gomock.Controller - var reconciler *LinodeObjectStorageBucketReconciler - var testLogs *bytes.Buffer + suite := NewControllerSuite(GinkgoT(), mock.MockLinodeObjectStorageClient{}) obj := infrav1.LinodeObjectStorageBucket{ ObjectMeta: metav1.ObjectMeta{ @@ -87,350 +78,303 @@ var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() { }, } - secret := corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(scope.AccessKeyNameTemplate, obj.Name), - Namespace: "default", - }, + bScope := scope.ObjectStorageBucketScope{ + Bucket: &obj, } - // Create a recorder with a buffered channel for consuming event strings. - recorder := record.NewFakeRecorder(10) - - BeforeEach(func() { - // Create a new gomock controller for each test run - mockCtrl = gomock.NewController(GinkgoT()) - // Inject io.Writer as log sink for consuming logs - testLogs = &bytes.Buffer{} - reconciler = &LinodeObjectStorageBucketReconciler{ - Client: k8sClient, - Recorder: recorder, - Logger: zap.New( - zap.WriteTo(GinkgoWriter), - zap.WriteTo(testLogs), - zap.UseDevMode(true), - ), - } - }) - - AfterEach(func() { - // At the end of each test run, tell the gomock controller it's done - // so it can check configured expectations and validate the methods called - mockCtrl.Finish() - // Flush the channel if any events were not consumed. - for len(recorder.Events) > 0 { - <-recorder.Events - } - }) - - It("should provision a bucket and keys", func(ctx SpecContext) { - mockLinodeClient := mock.NewMockLinodeObjectStorageClient(mockCtrl) - - getCall := mockLinodeClient.EXPECT(). - GetObjectStorageBucket(gomock.Any(), obj.Spec.Cluster, gomock.Any()). - Return(nil, nil). - Times(1) - - createBucketCall := mockLinodeClient.EXPECT(). - CreateObjectStorageBucket(gomock.Any(), gomock.Any()). - Return(&linodego.ObjectStorageBucket{ - Label: obj.Name, - Cluster: obj.Spec.Cluster, - Created: util.Pointer(time.Now()), - Hostname: "hostname", - }, nil). - Times(1). - After(getCall) - - for idx := range 2 { - mockLinodeClient.EXPECT(). - CreateObjectStorageKey(gomock.Any(), gomock.Any()). - DoAndReturn( - func(_ any, opts linodego.ObjectStorageKeyCreateOptions) (*linodego.ObjectStorageKey, error) { - return &linodego.ObjectStorageKey{ID: idx, AccessKey: fmt.Sprintf("key-%d", idx)}, nil - }). - Times(1). - After(createBucketCall) - } + reconciler := LinodeObjectStorageBucketReconciler{} - objectKey := client.ObjectKeyFromObject(&obj) + BeforeAll(func(ctx SpecContext) { + bScope.Client = k8sClient Expect(k8sClient.Create(ctx, &obj)).To(Succeed()) - - reconciler.LinodeClientBuilder = mockLinodeClientBuilder(mockLinodeClient) - _, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: objectKey, - }) - Expect(err).NotTo(HaveOccurred()) - - By("updating the bucket resource's status fields") - Expect(k8sClient.Get(ctx, objectKey, &obj)).To(Succeed()) - Expect(obj.Status.Ready).To(BeTrue()) - Expect(obj.Status.Conditions).To(HaveLen(1)) - Expect(obj.Status.Conditions[0].Type).To(Equal(clusterv1.ReadyCondition)) - Expect(*obj.Status.Hostname).To(Equal("hostname")) - Expect(obj.Status.CreationTime).NotTo(BeNil()) - Expect(*obj.Status.LastKeyGeneration).To(Equal(*obj.Spec.KeyGeneration)) - Expect(*obj.Status.LastKeyGeneration).To(Equal(0)) - Expect(*obj.Status.KeySecretName).To(Equal(secret.Name)) - Expect(obj.Status.AccessKeyRefs).To(HaveLen(scope.NumAccessKeys)) - - By("creating a secret with access keys") - Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(&secret), &secret)).To(Succeed()) - Expect(secret.Data).To(HaveLen(1)) - var key AccessKeySecret - unMarshallingErr := yaml.Unmarshal(secret.Data["bucket-details-secret.yaml"], &key) - Expect(unMarshallingErr).NotTo(HaveOccurred()) - Expect(key.StringData.BucketName).To(Equal("lifecycle")) - Expect(key.StringData.BucketRegion).To(Equal("cluster")) - Expect(key.StringData.BucketEndpoint).To(Equal("hostname")) - Expect(key.StringData.AccessKeyRW).To(Equal("key-0")) - Expect(key.StringData.SecretKeyRW).To(Equal("")) - Expect(key.StringData.AccessKeyRO).To(Equal("key-1")) - Expect(key.StringData.SecretKeyRO).To(Equal("")) - - By("recording the expected events") - Expect(<-recorder.Events).To(ContainSubstring("Object storage keys assigned")) - Expect(<-recorder.Events).To(ContainSubstring("Object storage keys stored in secret")) - Expect(<-recorder.Events).To(ContainSubstring("Object storage bucket synced")) - - By("logging the expected messages") - logOutput := testLogs.String() - Expect(logOutput).To(ContainSubstring("Reconciling apply")) - Expect(logOutput).To(ContainSubstring("Secret lifecycle-bucket-details was applied with new access keys")) }) - It("should ensure the bucket's secret exists", func(ctx SpecContext) { - mockLinodeClient := mock.NewMockLinodeObjectStorageClient(mockCtrl) - - getCall := mockLinodeClient.EXPECT(). - GetObjectStorageBucket(gomock.Any(), obj.Spec.Cluster, gomock.Any()). - Return(&linodego.ObjectStorageBucket{ - Label: obj.Name, - Cluster: obj.Spec.Cluster, - Created: util.Pointer(time.Now()), - Hostname: "hostname", - }, nil). - Times(1) - - for idx := range 2 { - mockLinodeClient.EXPECT(). - GetObjectStorageKey(gomock.Any(), idx). - Return(&linodego.ObjectStorageKey{ - ID: idx, - AccessKey: fmt.Sprintf("key-%d", idx), - }, nil). - Times(1). - After(getCall) - } - - objectKey := client.ObjectKeyFromObject(&obj) - Expect(k8sClient.Delete(ctx, &secret)).To(Succeed()) - - reconciler.LinodeClientBuilder = mockLinodeClientBuilder(mockLinodeClient) - _, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: objectKey, - }) - Expect(err).NotTo(HaveOccurred()) - - By("re-creating it when it is deleted") - Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(&secret), &secret)).To(Succeed()) - Expect(secret.Data).To(HaveLen(1)) - var key AccessKeySecret - unMarshallingErr := yaml.Unmarshal(secret.Data["bucket-details-secret.yaml"], &key) - Expect(unMarshallingErr).NotTo(HaveOccurred()) - Expect(key.StringData.BucketName).To(Equal("lifecycle")) - Expect(key.StringData.BucketRegion).To(Equal("cluster")) - Expect(key.StringData.BucketEndpoint).To(Equal("hostname")) - Expect(key.StringData.AccessKeyRW).To(Equal("key-0")) - Expect(key.StringData.SecretKeyRW).To(Equal("")) - Expect(key.StringData.AccessKeyRO).To(Equal("key-1")) - Expect(key.StringData.SecretKeyRO).To(Equal("")) - - By("recording the expected events") - Expect(<-recorder.Events).To(ContainSubstring("Object storage keys retrieved")) - Expect(<-recorder.Events).To(ContainSubstring("Object storage keys stored in secret")) - Expect(<-recorder.Events).To(ContainSubstring("Object storage bucket synced")) - - By("logging the expected messages") - logOutput := testLogs.String() - Expect(logOutput).To(ContainSubstring("Reconciling apply")) - Expect(logOutput).To(ContainSubstring("Secret lifecycle-bucket-details was applied with new access keys")) - }) + suite.BeforeEach(func(ctx context.Context, mck Mock) { + reconciler.Recorder = mck.Recorder() + bScope.Logger = mck.Logger() - It("should rotate the bucket's keys", func(ctx SpecContext) { - mockLinodeClient := mock.NewMockLinodeObjectStorageClient(mockCtrl) - - getCall := mockLinodeClient.EXPECT(). - GetObjectStorageBucket(gomock.Any(), obj.Spec.Cluster, gomock.Any()). - Return(&linodego.ObjectStorageBucket{ - Label: obj.Name, - Cluster: obj.Spec.Cluster, - Created: util.Pointer(time.Now()), - Hostname: "hostname", - }, nil). - Times(1) - - for idx := range 2 { - createCall := mockLinodeClient.EXPECT(). - CreateObjectStorageKey(gomock.Any(), gomock.Any()). - After(getCall). - DoAndReturn( - func(_ any, opts linodego.ObjectStorageKeyCreateOptions) (*linodego.ObjectStorageKey, error) { - return &linodego.ObjectStorageKey{ID: idx + 2, AccessKey: fmt.Sprintf("key-%d", idx+2)}, nil - }). - Times(1) - mockLinodeClient.EXPECT(). - DeleteObjectStorageKey(gomock.Any(), idx). - After(createCall). - Return(nil). - Times(1) - } - - objectKey := client.ObjectKeyFromObject(&obj) + objectKey := client.ObjectKey{Name: "lifecycle", Namespace: "default"} Expect(k8sClient.Get(ctx, objectKey, &obj)).To(Succeed()) - obj.Spec.KeyGeneration = ptr.To(1) - Expect(k8sClient.Update(ctx, &obj)).To(Succeed()) - reconciler.LinodeClientBuilder = mockLinodeClientBuilder(mockLinodeClient) - _, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: objectKey, - }) + // Create patch helper with latest state of resource. + // This is only needed when relying on envtest's k8sClient. + patchHelper, err := patch.NewHelper(&obj, k8sClient) Expect(err).NotTo(HaveOccurred()) - - By("updating the bucket resource's status fields") - Expect(k8sClient.Get(ctx, objectKey, &obj)).To(Succeed()) - Expect(*obj.Status.LastKeyGeneration).To(Equal(1)) - - By("recording the expected event") - Expect(<-recorder.Events).To(ContainSubstring("Object storage keys assigned")) - - By("logging the expected messages") - logOutput := testLogs.String() - Expect(logOutput).To(ContainSubstring("Reconciling apply")) - Expect(logOutput).To(ContainSubstring("Secret lifecycle-bucket-details was applied with new access keys")) + bScope.PatchHelper = patchHelper }) - It("should revoke the bucket's keys", func(ctx SpecContext) { - mockLinodeClient := mock.NewMockLinodeObjectStorageClient(mockCtrl) - - for i := range 2 { - mockLinodeClient.EXPECT(). - DeleteObjectStorageKey(gomock.Any(), i+2). - Return(nil). - Times(1) - } - - objectKey := client.ObjectKeyFromObject(&obj) - Expect(k8sClient.Delete(ctx, &obj)).To(Succeed()) - - reconciler.LinodeClientBuilder = mockLinodeClientBuilder(mockLinodeClient) - _, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: objectKey, - }) - Expect(err).NotTo(HaveOccurred()) - - By("removing the bucket's finalizer so it is deleted") - Expect(apierrors.IsNotFound(k8sClient.Get(ctx, objectKey, &obj))).To(BeTrue()) - - By("recording the expected event") - Expect(<-recorder.Events).To(ContainSubstring("Object storage keys revoked")) - - By("logging the expected messages") - logOutput := testLogs.String() - Expect(logOutput).To(ContainSubstring("Reconciling delete")) - }) -}) - -var _ = Describe("pre-reconcile", Label("bucket", "pre-reconcile"), func() { - var obj infrav1.LinodeObjectStorageBucket - var mockCtrl *gomock.Controller - var reconciler *LinodeObjectStorageBucketReconciler - var testLogs *bytes.Buffer - - recorder := record.NewFakeRecorder(10) - - BeforeEach(func() { - // Use a generated name to isolate objects per spec. - obj = infrav1.LinodeObjectStorageBucket{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "pre-reconcile-", - Namespace: "default", - }, - Spec: infrav1.LinodeObjectStorageBucketSpec{ - Cluster: "cluster", - }, - } - mockCtrl = gomock.NewController(GinkgoT()) - testLogs = &bytes.Buffer{} - reconciler = &LinodeObjectStorageBucketReconciler{ - Client: k8sClient, - Recorder: recorder, - Logger: zap.New( - zap.WriteTo(GinkgoWriter), - zap.WriteTo(testLogs), - zap.UseDevMode(true), + suite.Run( + OneOf( + Path(Call("bucket is created", func(ctx context.Context, mck Mock) { + getBucket := mck.ObjectStorageClient.EXPECT().GetObjectStorageBucket(gomock.Any(), obj.Spec.Cluster, gomock.Any()).Return(nil, nil) + mck.ObjectStorageClient.EXPECT().CreateObjectStorageBucket(gomock.Any(), gomock.Any()). + After(getBucket). + Return(&linodego.ObjectStorageBucket{ + Label: obj.Name, + Cluster: obj.Spec.Cluster, + Created: util.Pointer(time.Now()), + Hostname: "hostname", + }, nil) + })), + Path( + Call("bucket is not created", func(ctx context.Context, mck Mock) { + getBucket := mck.ObjectStorageClient.EXPECT().GetObjectStorageBucket(gomock.Any(), obj.Spec.Cluster, gomock.Any()).Return(nil, nil) + mck.ObjectStorageClient.EXPECT().CreateObjectStorageBucket(gomock.Any(), gomock.Any()).After(getBucket).Return(nil, errors.New("create bucket error")) + }), + Result("error", func(ctx context.Context, mck Mock) { + bScope.LinodeClient = mck.ObjectStorageClient + _, err := reconciler.reconcile(ctx, &bScope) + Expect(err.Error()).To(ContainSubstring("create bucket error")) + }), ), - } - }) - - AfterEach(func() { - mockCtrl.Finish() - for len(recorder.Events) > 0 { - <-recorder.Events - } - }) - - It("returns a nil error when the resource does not exist", func(ctx SpecContext) { - obj.Name = "empty" - _, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: client.ObjectKeyFromObject(&obj), - }) - Expect(err).To(BeNil()) - }) + ), + OneOf( + Path(Call("keys are created", func(ctx context.Context, mck Mock) { + for idx := range 2 { + mck.ObjectStorageClient.EXPECT().CreateObjectStorageKey(gomock.Any(), gomock.Any()). + Return(&linodego.ObjectStorageKey{ + ID: idx, + AccessKey: fmt.Sprintf("access-key-%d", idx), + SecretKey: fmt.Sprintf("secret-key-%d", idx), + }, nil) + } + })), + Path( + Call("keys are not created", func(ctx context.Context, mck Mock) { + mck.ObjectStorageClient.EXPECT().CreateObjectStorageKey(gomock.Any(), gomock.Any()).Return(nil, errors.New("create key error")) + }), + Result("error", func(ctx context.Context, mck Mock) { + bScope.LinodeClient = mck.ObjectStorageClient + _, err := reconciler.reconcile(ctx, &bScope) + Expect(err.Error()).To(ContainSubstring("create key error")) + }), + ), + ), + Result("resource status is updated and key secret is created", func(ctx context.Context, mck Mock) { + objectKey := client.ObjectKeyFromObject(&obj) + bScope.LinodeClient = mck.ObjectStorageClient + _, err := reconciler.reconcile(ctx, &bScope) + Expect(err).NotTo(HaveOccurred()) + + By("status") + Expect(k8sClient.Get(ctx, objectKey, &obj)).To(Succeed()) + Expect(obj.Status.Ready).To(BeTrue()) + Expect(obj.Status.Conditions).To(HaveLen(1)) + Expect(obj.Status.Conditions[0].Type).To(Equal(clusterv1.ReadyCondition)) + Expect(*obj.Status.Hostname).To(Equal("hostname")) + Expect(obj.Status.CreationTime).NotTo(BeNil()) + Expect(*obj.Status.LastKeyGeneration).To(Equal(*obj.Spec.KeyGeneration)) + Expect(*obj.Status.LastKeyGeneration).To(Equal(0)) + Expect(*obj.Status.KeySecretName).To(Equal(fmt.Sprintf(scope.AccessKeyNameTemplate, "lifecycle"))) + Expect(obj.Status.AccessKeyRefs).To(HaveLen(scope.NumAccessKeys)) + + By("secret") + var secret corev1.Secret + secretKey := client.ObjectKey{Namespace: "default", Name: *obj.Status.KeySecretName} + Expect(k8sClient.Get(ctx, secretKey, &secret)).To(Succeed()) + Expect(secret.Data).To(HaveLen(1)) + + var key accessKeySecret + Expect(yaml.Unmarshal(secret.Data["bucket-details-secret.yaml"], &key)).NotTo(HaveOccurred()) + Expect(key.StringData.BucketName).To(Equal("lifecycle")) + Expect(key.StringData.BucketRegion).To(Equal("cluster")) + Expect(key.StringData.BucketEndpoint).To(Equal("hostname")) + Expect(key.StringData.AccessKeyRW).To(Equal("access-key-0")) + Expect(key.StringData.SecretKeyRW).To(Equal("secret-key-0")) + Expect(key.StringData.AccessKeyRO).To(Equal("access-key-1")) + Expect(key.StringData.SecretKeyRO).To(Equal("secret-key-1")) + + events := mck.Events() + Expect(events).To(ContainSubstring("Object storage keys assigned")) + Expect(events).To(ContainSubstring("Object storage keys stored in secret")) + Expect(events).To(ContainSubstring("Object storage bucket synced")) + + logOutput := mck.Logs() + Expect(logOutput).To(ContainSubstring("Reconciling apply")) + Expect(logOutput).To(ContainSubstring("Secret lifecycle-bucket-details was applied with new access keys")) + }), + OneOf( + Path(Call("bucket is retrieved on update", func(ctx context.Context, mck Mock) { + mck.ObjectStorageClient.EXPECT().GetObjectStorageBucket(gomock.Any(), obj.Spec.Cluster, gomock.Any()). + Return(&linodego.ObjectStorageBucket{ + Label: obj.Name, + Cluster: obj.Spec.Cluster, + Created: util.Pointer(time.Now()), + Hostname: "hostname", + }, nil) + })), + Path( + Call("bucket is not retrieved on update", func(ctx context.Context, mck Mock) { + mck.ObjectStorageClient.EXPECT().GetObjectStorageBucket(gomock.Any(), obj.Spec.Cluster, gomock.Any()).Return(nil, errors.New("get bucket error")) + }), + Result("error", func(ctx context.Context, mck Mock) { + bScope.LinodeClient = mck.ObjectStorageClient + _, err := reconciler.reconcile(ctx, &bScope) + Expect(err.Error()).To(ContainSubstring("get bucket error")) + }), + ), + ), + Once("resource keyGeneration is modified", func(ctx context.Context, _ Mock) { + objectKey := client.ObjectKeyFromObject(&obj) + Expect(k8sClient.Get(ctx, objectKey, &obj)).To(Succeed()) + obj.Spec.KeyGeneration = ptr.To(1) + Expect(k8sClient.Update(ctx, &obj)).To(Succeed()) + }), + OneOf( + // nb: Order matters for paths of the same length. The leftmost path is evaluated first. + // If we evaluate the happy path first, the bucket resource is mutated so the error path won't occur. + Path( + Call("keys are not rotated", func(ctx context.Context, mck Mock) { + mck.ObjectStorageClient.EXPECT().CreateObjectStorageKey(gomock.Any(), gomock.Any()).Return(nil, errors.New("create key error")) + }), + Result("error", func(ctx context.Context, mck Mock) { + bScope.LinodeClient = mck.ObjectStorageClient + _, err := reconciler.reconcile(ctx, &bScope) + Expect(err.Error()).To(ContainSubstring("create key error")) + }), + ), + Path( + Call("keys are rotated", func(ctx context.Context, mck Mock) { + for idx := range 2 { + createCall := mck.ObjectStorageClient.EXPECT().CreateObjectStorageKey(gomock.Any(), gomock.Any()). + Return(&linodego.ObjectStorageKey{ + ID: idx + 2, + AccessKey: fmt.Sprintf("access-key-%d", idx+2), + SecretKey: fmt.Sprintf("secret-key-%d", idx+2), + }, nil) + mck.ObjectStorageClient.EXPECT().DeleteObjectStorageKey(gomock.Any(), idx).After(createCall).Return(nil) + } + }), + Result("resource lastKeyGeneration is updated", func(ctx context.Context, mck Mock) { + objectKey := client.ObjectKeyFromObject(&obj) + bScope.LinodeClient = mck.ObjectStorageClient + _, err := reconciler.reconcile(ctx, &bScope) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient.Get(ctx, objectKey, &obj)).To(Succeed()) + Expect(*obj.Status.LastKeyGeneration).To(Equal(1)) + + Expect(mck.Events()).To(ContainSubstring("Object storage keys assigned")) + + logOutput := mck.Logs() + Expect(logOutput).To(ContainSubstring("Reconciling apply")) + Expect(logOutput).To(ContainSubstring("Secret lifecycle-bucket-details was applied with new access keys")) + }), + ), + Path(Once("secret is deleted", func(ctx context.Context, _ Mock) { + var secret corev1.Secret + secretKey := client.ObjectKey{Namespace: "default", Name: *obj.Status.KeySecretName} + Expect(k8sClient.Get(ctx, secretKey, &secret)).To(Succeed()) + Expect(k8sClient.Delete(ctx, &secret)).To(Succeed()) + })), + ), + OneOf( + Path( + Call("keys are not retrieved", func(ctx context.Context, mck Mock) { + mck.ObjectStorageClient.EXPECT().GetObjectStorageKey(gomock.Any(), gomock.Any()).Times(2).Return(nil, errors.New("get key error")) + }), + Result("error", func(ctx context.Context, mck Mock) { + bScope.LinodeClient = mck.ObjectStorageClient + _, err := reconciler.reconcile(ctx, &bScope) + Expect(err.Error()).To(ContainSubstring("get key error")) + }), + ), + Path( + Call("keys are retrieved", func(ctx context.Context, mck Mock) { + for idx := range 2 { + mck.ObjectStorageClient.EXPECT().GetObjectStorageKey(gomock.Any(), idx+2). + Return(&linodego.ObjectStorageKey{ + ID: idx + 2, + AccessKey: fmt.Sprintf("access-key-%d", idx+2), + SecretKey: fmt.Sprintf("secret-key-%d", idx+2), + }, nil) + } + }), + Result("secret is restored", func(ctx context.Context, mck Mock) { + bScope.LinodeClient = mck.ObjectStorageClient + _, err := reconciler.reconcile(ctx, &bScope) + Expect(err).NotTo(HaveOccurred()) + + var secret corev1.Secret + secretKey := client.ObjectKey{Namespace: "default", Name: *obj.Status.KeySecretName} + Expect(k8sClient.Get(ctx, secretKey, &secret)).To(Succeed()) + Expect(secret.Data).To(HaveLen(1)) + + var key accessKeySecret + Expect(yaml.Unmarshal(secret.Data["bucket-details-secret.yaml"], &key)).NotTo(HaveOccurred()) + Expect(key.StringData.BucketName).To(Equal("lifecycle")) + Expect(key.StringData.BucketRegion).To(Equal("cluster")) + Expect(key.StringData.BucketEndpoint).To(Equal("hostname")) + Expect(key.StringData.AccessKeyRW).To(Equal("access-key-2")) + Expect(key.StringData.SecretKeyRW).To(Equal("secret-key-2")) + Expect(key.StringData.AccessKeyRO).To(Equal("access-key-3")) + Expect(key.StringData.SecretKeyRO).To(Equal("secret-key-3")) + + events := mck.Events() + Expect(events).To(ContainSubstring("Object storage keys retrieved")) + Expect(events).To(ContainSubstring("Object storage keys stored in secret")) + Expect(events).To(ContainSubstring("Object storage bucket synced")) + + logOutput := mck.Logs() + Expect(logOutput).To(ContainSubstring("Reconciling apply")) + Expect(logOutput).To(ContainSubstring("Secret lifecycle-bucket-details was applied with new access keys")) + }), + ), + ), + Once("resource is deleted", func(ctx context.Context, _ Mock) { + // nb: client.Delete does not set DeletionTimestamp on the object, so re-fetch from the apiserver. + objectKey := client.ObjectKeyFromObject(&obj) + Expect(k8sClient.Delete(ctx, &obj)).To(Succeed()) + Expect(k8sClient.Get(ctx, objectKey, &obj)).To(Succeed()) + }), + OneOf( + Path( + Call("keys are not revoked", func(ctx context.Context, mck Mock) { + mck.ObjectStorageClient.EXPECT().DeleteObjectStorageKey(gomock.Any(), gomock.Any()).Times(2).Return(errors.New("revoke error")) + }), + Result("error", func(ctx context.Context, mck Mock) { + bScope.LinodeClient = mck.ObjectStorageClient + _, err := reconciler.reconcile(ctx, &bScope) + Expect(err.Error()).To(ContainSubstring("revoke error")) + }), + ), + Path( + Call("keys are revoked", func(ctx context.Context, mck Mock) { + mck.ObjectStorageClient.EXPECT().DeleteObjectStorageKey(gomock.Any(), 2).Return(nil) + mck.ObjectStorageClient.EXPECT().DeleteObjectStorageKey(gomock.Any(), 3).Return(nil) + }), + Result("finalizer is removed", func(ctx context.Context, mck Mock) { + objectKey := client.ObjectKeyFromObject(&obj) + k8sClient.Get(ctx, objectKey, &obj) + bScope.LinodeClient = mck.ObjectStorageClient + _, err := reconciler.reconcile(ctx, &bScope) + Expect(err).NotTo(HaveOccurred()) + Expect(apierrors.IsNotFound(k8sClient.Get(ctx, objectKey, &obj))).To(BeTrue()) + + Expect(mck.Events()).To(ContainSubstring("Object storage keys revoked")) + Expect(mck.Logs()).To(ContainSubstring("Reconciling delete")) + }), + ), + ), + ) +}) - It("fails when the resource cannot be fetched", func(ctx SpecContext) { - mockK8sClient := mock.NewMockk8sClient(mockCtrl) - mockK8sClient.EXPECT(). - Get(gomock.Any(), gomock.Any(), gomock.Any()). - Return(errors.New("non-404 error")). - Times(1) - - reconciler.Client = mockK8sClient - _, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: client.ObjectKeyFromObject(&obj), - }) - Expect(err.Error()).To(ContainSubstring("non-404 error")) - Expect(testLogs.String()).To(ContainSubstring("Failed to fetch LinodeObjectStorageBucket")) - }) +var _ = Describe("errors", Label("bucket", "errors"), func() { + suite := NewControllerSuite( + GinkgoT(), + mock.MockLinodeObjectStorageClient{}, + mock.MockK8sClient{}, + ) - It("fails when a scope cannot be created due to missing arguments", func(ctx SpecContext) { - Expect(k8sClient.Create(ctx, &obj)).To(Succeed()) - _, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: client.ObjectKeyFromObject(&obj), - }) - Expect(err.Error()).To(ContainSubstring("failed to create object storage bucket scope")) - Expect(testLogs.String()).To(ContainSubstring("Failed to create object storage bucket scope")) - }) -}) + reconciler := LinodeObjectStorageBucketReconciler{} + bScope := scope.ObjectStorageBucketScope{} -var _ = Describe("apply", Label("bucket", "apply"), func() { - var obj infrav1.LinodeObjectStorageBucket - var mockCtrl *gomock.Controller - var testLogs *bytes.Buffer - - recorder := record.NewFakeRecorder(10) - reconciler := &LinodeObjectStorageBucketReconciler{ - Logger: zap.New( - zap.WriteTo(GinkgoWriter), - zap.WriteTo(testLogs), - zap.UseDevMode(true), - ), - Recorder: recorder, - } + suite.BeforeEach(func(_ context.Context, mck Mock) { + reconciler.Recorder = mck.Recorder() + bScope.Logger = mck.Logger() - BeforeEach(func() { + // Reset obj to base state to be modified in each test path. // We can use a consistent name since these tests are stateless. - obj = infrav1.LinodeObjectStorageBucket{ + bScope.Bucket = &infrav1.LinodeObjectStorageBucket{ ObjectMeta: metav1.ObjectMeta{ Name: "mock", Namespace: "default", @@ -440,240 +384,148 @@ var _ = Describe("apply", Label("bucket", "apply"), func() { Cluster: "cluster", }, } - mockCtrl = gomock.NewController(GinkgoT()) - testLogs = &bytes.Buffer{} - reconciler.Logger = zap.New( - zap.WriteTo(GinkgoWriter), - zap.WriteTo(testLogs), - zap.UseDevMode(true), - ) - }) - - AfterEach(func() { - mockCtrl.Finish() - for len(recorder.Events) > 0 { - <-recorder.Events - } - }) - - It("fails when a finalizer cannot be added", func(ctx SpecContext) { - mockK8sClient := mock.NewMockk8sClient(mockCtrl) - prev := mockK8sClient.EXPECT(). - Scheme(). - Return(scheme.Scheme). - Times(1) - mockK8sClient.EXPECT(). - Scheme(). - After(prev). - Return(runtime.NewScheme()). - Times(2) - - patchHelper, err := patch.NewHelper(&obj, mockK8sClient) - Expect(err).NotTo(HaveOccurred()) - - // Create a scope directly since only a subset of fields are needed. - bScope := scope.ObjectStorageBucketScope{ - Client: mockK8sClient, - Bucket: &obj, - PatchHelper: patchHelper, - } - - _, err = reconciler.reconcile(ctx, &bScope) - Expect(err.Error()).To(ContainSubstring("no kind is registered")) - }) - - It("fails when it can't ensure a bucket exists", func(ctx SpecContext) { - mockLinodeClient := mock.NewMockLinodeObjectStorageClient(mockCtrl) - mockLinodeClient.EXPECT(). - GetObjectStorageBucket(gomock.Any(), gomock.Any(), gomock.Any()). - Return(nil, errors.New("non-404 error")). - Times(1) - - bScope := scope.ObjectStorageBucketScope{ - LinodeClient: mockLinodeClient, - Bucket: &obj, - Logger: reconciler.Logger, - } - - err := reconciler.reconcileApply(ctx, &bScope) - Expect(err.Error()).To(ContainSubstring("non-404 error")) - Expect(<-recorder.Events).To(ContainSubstring("non-404 error")) - Expect(testLogs.String()).To(ContainSubstring("Failed to ensure bucket exists")) - }) - - It("fails when it can't provision new access keys", func(ctx SpecContext) { - mockLinodeClient := mock.NewMockLinodeObjectStorageClient(mockCtrl) - mockLinodeClient.EXPECT(). - GetObjectStorageBucket(gomock.Any(), gomock.Any(), gomock.Any()). - Return(&linodego.ObjectStorageBucket{Created: ptr.To(time.Now())}, nil). - Times(1) - mockLinodeClient.EXPECT(). - CreateObjectStorageKey(gomock.Any(), gomock.Any()). - Return(nil, errors.New("api error")). - Times(1) - - bScope := scope.ObjectStorageBucketScope{ - LinodeClient: mockLinodeClient, - Bucket: &obj, - Logger: reconciler.Logger, - } - - err := reconciler.reconcileApply(ctx, &bScope) - Expect(err.Error()).To(ContainSubstring("api error")) - Expect(<-recorder.Events).To(ContainSubstring("api error")) - Expect(testLogs.String()).To(ContainSubstring("Failed to provision new access keys")) - }) - - It("fails when it can't evaluate whether to restore a key secret", func(ctx SpecContext) { - mockLinodeClient := mock.NewMockLinodeObjectStorageClient(mockCtrl) - mockLinodeClient.EXPECT(). - GetObjectStorageBucket(gomock.Any(), gomock.Any(), gomock.Any()). - Return(&linodego.ObjectStorageBucket{Created: ptr.To(time.Now())}, nil). - Times(1) - - mockK8sClient := mock.NewMockk8sClient(mockCtrl) - mockK8sClient.EXPECT(). - Get(gomock.Any(), gomock.Any(), gomock.Any()). - Return(errors.New("api error")). - Times(1) - - obj.Spec.KeyGeneration = ptr.To(1) - obj.Status.LastKeyGeneration = obj.Spec.KeyGeneration - obj.Status.KeySecretName = ptr.To("mock-bucket-details") - obj.Status.AccessKeyRefs = []int{0, 1} - - bScope := scope.ObjectStorageBucketScope{ - Client: mockK8sClient, - LinodeClient: mockLinodeClient, - Bucket: &obj, - Logger: reconciler.Logger, - } - - err := reconciler.reconcileApply(ctx, &bScope) - Expect(err.Error()).To(ContainSubstring("api error")) - Expect(<-recorder.Events).To(ContainSubstring("api error")) - Expect(testLogs.String()).To(ContainSubstring("Failed to ensure access key secret exists")) - }) - - It("fails when it can't retrieve access keys for a deleted secret", func(ctx SpecContext) { - mockLinodeClient := mock.NewMockLinodeObjectStorageClient(mockCtrl) - mockLinodeClient.EXPECT(). - GetObjectStorageBucket(gomock.Any(), gomock.Any(), gomock.Any()). - Return(&linodego.ObjectStorageBucket{Created: ptr.To(time.Now())}, nil). - Times(1) - mockLinodeClient.EXPECT(). - GetObjectStorageKey(gomock.Any(), gomock.Any()). - Return(nil, errors.New("key creation error")). - Times(2) - - mockK8sClient := mock.NewMockk8sClient(mockCtrl) - mockK8sClient.EXPECT(). - Get(gomock.Any(), gomock.Any(), gomock.Any()). - Return(apierrors.NewNotFound(schema.GroupResource{Resource: "Secret"}, "mock-bucket-details")). - Times(1) - - obj.Spec.KeyGeneration = ptr.To(1) - obj.Status.LastKeyGeneration = obj.Spec.KeyGeneration - obj.Status.KeySecretName = ptr.To("mock-bucket-details") - obj.Status.AccessKeyRefs = []int{0, 1} - - bScope := scope.ObjectStorageBucketScope{ - Client: mockK8sClient, - LinodeClient: mockLinodeClient, - Bucket: &obj, - Logger: reconciler.Logger, - } - - err := reconciler.reconcileApply(ctx, &bScope) - Expect(err.Error()).To(ContainSubstring("key creation error")) - Expect(<-recorder.Events).To(ContainSubstring("key creation error")) - Expect(testLogs.String()).To(ContainSubstring("Failed to restore access keys for deleted secret")) - }) - - It("fails when it can't generate a secret", func(ctx SpecContext) { - mockLinodeClient := mock.NewMockLinodeObjectStorageClient(mockCtrl) - mockLinodeClient.EXPECT(). - GetObjectStorageBucket(gomock.Any(), gomock.Any(), gomock.Any()). - Return(&linodego.ObjectStorageBucket{Created: ptr.To(time.Now())}, nil). - Times(1) - for idx := range 2 { - mockLinodeClient.EXPECT(). - GetObjectStorageKey(gomock.Any(), idx). - Return(&linodego.ObjectStorageKey{ID: idx}, nil). - Times(1) - } - - mockK8sClient := mock.NewMockk8sClient(mockCtrl) - mockK8sClient.EXPECT(). - Get(gomock.Any(), gomock.Any(), gomock.Any()). - Return(apierrors.NewNotFound(schema.GroupResource{Resource: "Secret"}, "mock-bucket-details")). - Times(1) - mockK8sClient.EXPECT(). - Scheme(). - Return(runtime.NewScheme()). - Times(1) - - obj.Spec.KeyGeneration = ptr.To(1) - obj.Status.LastKeyGeneration = obj.Spec.KeyGeneration - obj.Status.KeySecretName = ptr.To("mock-bucket-details") - obj.Status.AccessKeyRefs = []int{0, 1} - - bScope := scope.ObjectStorageBucketScope{ - Client: mockK8sClient, - LinodeClient: mockLinodeClient, - Bucket: &obj, - Logger: reconciler.Logger, - } - - err := reconciler.reconcileApply(ctx, &bScope) - Expect(err.Error()).To(ContainSubstring("no kind is registered")) - Expect(<-recorder.Events).To(ContainSubstring("keys retrieved")) - Expect(<-recorder.Events).To(ContainSubstring("no kind is registered")) - Expect(testLogs.String()).To(ContainSubstring("Failed to generate key secret")) }) - It("fails when it can't restore a deleted secret", func(ctx SpecContext) { - mockLinodeClient := mock.NewMockLinodeObjectStorageClient(mockCtrl) - mockLinodeClient.EXPECT(). - GetObjectStorageBucket(gomock.Any(), gomock.Any(), gomock.Any()). - Return(&linodego.ObjectStorageBucket{Created: ptr.To(time.Now())}, nil). - Times(1) - for idx := range 2 { - mockLinodeClient.EXPECT(). - GetObjectStorageKey(gomock.Any(), idx). - Return(&linodego.ObjectStorageKey{ID: idx}, nil). - Times(1) - } - - mockK8sClient := mock.NewMockk8sClient(mockCtrl) - mockK8sClient.EXPECT(). - Scheme(). - Return(scheme.Scheme). - Times(1) - mockK8sClient.EXPECT(). - Get(gomock.Any(), gomock.Any(), gomock.Any()). - Return(apierrors.NewNotFound(schema.GroupResource{Resource: "Secret"}, "mock-bucket-details")). - Times(1) - mockK8sClient.EXPECT(). - Create(gomock.Any(), gomock.Any(), gomock.Any()). - Return(errors.New("secret creation error")) - - obj.Spec.KeyGeneration = ptr.To(1) - obj.Status.LastKeyGeneration = obj.Spec.KeyGeneration - obj.Status.KeySecretName = ptr.To("mock-bucket-details") - obj.Status.AccessKeyRefs = []int{0, 1} - - bScope := scope.ObjectStorageBucketScope{ - Client: mockK8sClient, - LinodeClient: mockLinodeClient, - Bucket: &obj, - Logger: reconciler.Logger, - } - - err := reconciler.reconcileApply(ctx, &bScope) - Expect(err.Error()).To(ContainSubstring("secret creation error")) - Expect(<-recorder.Events).To(ContainSubstring("keys retrieved")) - Expect(<-recorder.Events).To(ContainSubstring("secret creation error")) - Expect(testLogs.String()).To(ContainSubstring("Failed to apply key secret")) - }) + suite.Run( + OneOf( + Path(Call("resource can be fetched", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + })), + Path( + Call("resource is not found", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(apierrors.NewNotFound(schema.GroupResource{}, "mock")) + }), + Result("no error", func(ctx context.Context, mck Mock) { + reconciler.Client = mck.K8sClient + _, err := reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(bScope.Bucket), + }) + Expect(err).NotTo(HaveOccurred()) + }), + ), + Path( + Call("resource can't be fetched", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("non-404 error")) + }), + Result("error", func(ctx context.Context, mck Mock) { + reconciler.Client = mck.K8sClient + reconciler.Logger = bScope.Logger + _, err := reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(bScope.Bucket), + }) + Expect(err.Error()).To(ContainSubstring("non-404 error")) + Expect(mck.Logs()).To(ContainSubstring("Failed to fetch LinodeObjectStorageBucket")) + }), + ), + ), + Result("scope params is missing args", func(ctx context.Context, mck Mock) { + reconciler.Client = mck.K8sClient + reconciler.Logger = bScope.Logger + _, err := reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(bScope.Bucket), + }) + Expect(err.Error()).To(ContainSubstring("failed to create object storage bucket scope")) + Expect(mck.Logs()).To(ContainSubstring("Failed to create object storage bucket scope")) + }), + Call("scheme with no infrav1alpha1", func(ctx context.Context, mck Mock) { + prev := mck.K8sClient.EXPECT().Scheme().Return(scheme.Scheme) + mck.K8sClient.EXPECT().Scheme().After(prev).Return(runtime.NewScheme()).Times(2) + }), + Result("error", func(ctx context.Context, mck Mock) { + bScope.Client = mck.K8sClient + + patchHelper, err := patch.NewHelper(bScope.Bucket, mck.K8sClient) + Expect(err).NotTo(HaveOccurred()) + bScope.PatchHelper = patchHelper + + _, err = reconciler.reconcile(ctx, &bScope) + Expect(err.Error()).To(ContainSubstring("no kind is registered")) + }), + Call("get bucket", func(ctx context.Context, mck Mock) { + mck.ObjectStorageClient.EXPECT().GetObjectStorageBucket(gomock.Any(), gomock.Any(), gomock.Any()).Return(&linodego.ObjectStorageBucket{Created: ptr.To(time.Now())}, nil) + }), + OneOf( + Path( + Call("failed check for deleted secret", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("api error")) + }), + Result("error", func(ctx context.Context, mck Mock) { + bScope.Bucket.Spec.KeyGeneration = ptr.To(1) + bScope.Bucket.Status.LastKeyGeneration = bScope.Bucket.Spec.KeyGeneration + bScope.Bucket.Status.KeySecretName = ptr.To("mock-bucket-details") + bScope.Bucket.Status.AccessKeyRefs = []int{0, 1} + + bScope.LinodeClient = mck.ObjectStorageClient + bScope.Client = mck.K8sClient + err := reconciler.reconcileApply(ctx, &bScope) + Expect(err.Error()).To(ContainSubstring("api error")) + Expect(mck.Events()).To(ContainSubstring("api error")) + Expect(mck.Logs()).To(ContainSubstring("Failed to ensure access key secret exists")) + }), + ), + Path(Call("secret deleted", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(apierrors.NewNotFound(schema.GroupResource{Resource: "Secret"}, "mock-bucket-details")) + })), + ), + Call("get keys", func(ctx context.Context, mck Mock) { + for idx := range 2 { + mck.ObjectStorageClient.EXPECT().GetObjectStorageKey(gomock.Any(), idx).Return(&linodego.ObjectStorageKey{ID: idx}, nil) + } + }), + OneOf( + Path( + Call("secret resource creation fails", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Scheme().Return(scheme.Scheme).AnyTimes() + mck.K8sClient.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("secret creation error")) + }), + Result("creation error", func(ctx context.Context, mck Mock) { + bScope.Bucket.Spec.KeyGeneration = ptr.To(1) + bScope.Bucket.Status.LastKeyGeneration = bScope.Bucket.Spec.KeyGeneration + bScope.Bucket.Status.KeySecretName = ptr.To("mock-bucket-details") + bScope.Bucket.Status.AccessKeyRefs = []int{0, 1} + + bScope.LinodeClient = mck.ObjectStorageClient + bScope.Client = mck.K8sClient + err := reconciler.reconcileApply(ctx, &bScope) + Expect(err.Error()).To(ContainSubstring("secret creation error")) + Expect(mck.Events()).To(ContainSubstring("keys retrieved")) + Expect(mck.Events()).To(ContainSubstring("secret creation error")) + Expect(mck.Logs()).To(ContainSubstring("Failed to apply key secret")) + }), + ), + Path( + Call("secret generation fails", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Scheme().Return(runtime.NewScheme()) + }), + Result("error", func(ctx context.Context, mck Mock) { + bScope.Bucket.Spec.KeyGeneration = ptr.To(1) + bScope.Bucket.Status.LastKeyGeneration = bScope.Bucket.Spec.KeyGeneration + bScope.Bucket.Status.KeySecretName = ptr.To("mock-bucket-details") + bScope.Bucket.Status.AccessKeyRefs = []int{0, 1} + + bScope.LinodeClient = mck.ObjectStorageClient + bScope.Client = mck.K8sClient + err := reconciler.reconcileApply(ctx, &bScope) + Expect(err.Error()).To(ContainSubstring("no kind is registered")) + Expect(mck.Events()).To(ContainSubstring("keys retrieved")) + Expect(mck.Events()).To(ContainSubstring("no kind is registered")) + Expect(mck.Logs()).To(ContainSubstring("Failed to generate key secret")) + }), + ), + ), + Once("finalizer is missing", func(ctx context.Context, _ Mock) { + bScope.Bucket.Status.AccessKeyRefs = []int{0, 1} + bScope.Bucket.ObjectMeta.Finalizers = []string{} + }), + Call("revoke keys", func(ctx context.Context, mck Mock) { + mck.ObjectStorageClient.EXPECT().DeleteObjectStorageKey(gomock.Any(), gomock.Any()).Times(2).Return(nil) + }), + Result("error", func(ctx context.Context, mck Mock) { + bScope.LinodeClient = mck.ObjectStorageClient + bScope.Client = mck.K8sClient + err := reconciler.reconcileDelete(ctx, &bScope) + Expect(err.Error()).To(ContainSubstring("failed to remove finalizer from bucket")) + Expect(mck.Events()).To(ContainSubstring("failed to remove finalizer from bucket")) + }), + ) }) diff --git a/docs/src/developers/testing.md b/docs/src/developers/testing.md index 61d9badc9..9835e20c7 100644 --- a/docs/src/developers/testing.md +++ b/docs/src/developers/testing.md @@ -6,6 +6,172 @@ In order to run the unit tests run the following command ```bash make test ``` +### Adding tests +General unit tests of functions follow the same conventions for testing using Go's `testing` standard library, along with the [testify](https://github.com/stretchr/testify) toolkit for making assertions. + +Unit tests that require API clients use mock clients generated using [gomock](https://github.com/uber-go/mock). To simplify the usage of mock clients, this repo also uses an internal library defined in `mock/mocktest`. + +`mocktest` is usually imported as a dot import along with the `mock` package: + +```go +import ( + "github.com/linode/cluster-api-provider-linode/mock" + + . "github.com/linode/cluster-api-provider-linode/mock/mocktest" +) +``` + +Using `mocktest` involves creating a test suite that specifies the mock clients to be used within each test scope and running the test suite using a DSL for defnining test nodes belong to one or more test paths. + +#### Example +The following is a contrived example using the mock Linode machine client. + +Let's say we've written an idempotent function `EnsureInstanceRuns` that 1) gets an instance or creates it if it doesn't exist, 2) boots the instance if it's offline. Testing this function would mean we'd need to write test cases for all permutations, i.e. +* instance exists and is not offline +* instance exists but is offline, and is able to boot +* instance exists but is offline, and is not able to boot +* instance does not exist, and is not able to be created +* instance does not exist, and is able to be created, and is able to boot +* instance does not exist, and is able to be created, and is not able to boot + +While writing test cases for each scenario, we'd likely find a lot of overlap between each. `mocktest` provides a DSL for defining each unique test case without needing to spell out all required mock client calls for each case. Here's how we could test `EnsureInstanceRuns` using `mocktest`: + +```go +func TestEnsureInstanceNotOffline(t *testing.T) { + suite := NewSuite(t, mock.MockLinodeMachineClient{}) + + suite.Run( + OneOf( + Path( + Call("instance exists and is not offline", func(ctx context.Context, mck Mock) { + mck.MachineClient.EXPECT().GetInstance(ctx, /* ... */).Return(&linodego.Instance{Status: linodego.InstanceRunning}, nil) + }), + Result("success", func(ctx context.Context, mck Mock) { + inst, err := EnsureInstanceNotOffline(ctx, /* ... */) + require.NoError(t, err) + assert.Equal(t, inst.Status, linodego.InstanceRunning) + }) + ), + Path( + Call("instance does not exist", func(ctx context.Context, mck Mock) { + mck.MachineClient.EXPECT().GetInstance(ctx, /* ... */).Return(nil, linodego.Error{Code: 404}) + }), + OneOf( + Path(Call("able to be created", func(ctx context.Context, mck Mock) { + mck.MachineClient.EXPECT().CreateInstance(ctx, /* ... */).Return(&linodego.Instance{Status: linodego.InstanceOffline}, nil) + })), + Path( + Call("not able to be created", func(ctx context.Context, mck Mock) {/* ... */}) + Result("error", func(ctx context.Context, mck Mock) { + inst, err := EnsureInstanceNotOffline(ctx, /* ... */) + require.ErrorContains(t, err, "instance was not booted: failed to create instance: reasons...") + assert.Empty(inst) + }), + ) + ), + ), + Path(Call("instance exists but is offline", func(ctx context.Context, mck Mock) { + mck.MachineClient.EXPECT().GetInstance(ctx, /* ... */).Return(&linodego.Instance{Status: linodego.InstanceOffline}, nil) + })), + ), + OneOf( + Path( + Call("able to boot", func(ctx context.Context, mck Mock) {/* */}) + Result("success", func(ctx context.Context, mck Mock) { + inst, err := EnsureInstanceNotOffline(ctx, /* ... */) + require.NoError(t, err) + assert.Equal(t, inst.Status, linodego.InstanceBooting) + }) + ), + Path( + Call("not able to boot", func(ctx context.Context, mck Mock) {/* returns API error */}) + Result("error", func(ctx context.Context, mck Mock) { + inst, err := EnsureInstanceNotOffline(/* options */) + require.ErrorContains(t, err, "instance was not booted: boot failed: reasons...") + assert.Empty(inst) + }) + ) + ), + ) +} +``` +In this example, the nodes passed into `Run` are used to describe each permutation of the function being called with different results from the mock Linode machine client. + +#### Nodes +* `Call` describes the behavior of method calls by mock clients. A `Call` node can belong to one or more paths. +* `Result` invokes the function with mock clients and tests the output. A `Result` node terminates each path it belongs to. +* `OneOf` is a collection of diverging paths that will be evaluated in separate test cases. +* `Path` is a collection of nodes that all belong to the same test path. Each child node of a `Path` is evaluated in order. Note that `Path` is only needed for logically grouping and isolating nodes within different test cases in a `OneOf` node. + +#### Setup, tear down, and event triggers +Setup and tear down nodes can be scheduled before and after each run. `suite.BeforeEach` receives a `func(context.Context, Mock)` function that will run before each path is evaluated. Likewise, `suite.AfterEach` will run after each path is evaluated. + +In addition to the path nodes listed in the section above, a special node type `Once` may be specified to inject a function that will only be evaluated one time across all paths. It can be used to trigger side effects outside of mock client behavior that can impact the output of the function being tested. + +#### Control flow +When `Run` is called on a test suite, paths are evaluated in parallel using `t.Parallel()`. Each path will be run with a separate `t.Run` call, and each test run will be named according to the descriptions specified in each node. + +To help with visualizing the paths that will be rendered from nodes, a `DescribePaths` helper function can be called which returns a slice of strings describing each path. For instance, the following shows the output of `DescribePaths` on the paths described in the example above: + +```go +DescribePaths(/* nodes... */) /* [ + "instance exists and is not offline > success", + "instance does not exist > not able to be created > error", + "instance does not exist > able to be created > able to boot > success", + "instance does not exist > able to be created > not able to boot > error", + "instance exists but is offline > able to boot > success", + "instance exists but is offline > not able to boot > error" +] */ +``` + +#### Testing controllers +CAPL uses controller-runtime's [envtest](https://book.kubebuilder.io/reference/envtest) package which runs an instance of etcd and the Kubernetes API server for testing controllers. The test setup uses [ginkgo](https://onsi.github.io/ginkgo/) as its test runner as well as [gomega](https://onsi.github.io/gomega/) for assertions. + +`mocktest` is also recommended when writing tests for controllers. The following is another contrived example of how to use its controller suite: + +```go +var _ = Describe("linode creation", func() { + // Create a mocktest controller suite. + suite := NewControllerSuite(GinkgoT(), mock.MockLinodeMachineClient{}) + + obj := infrav1alpha1.LinodeMachine{ + ObjectMeta: metav1.ObjectMeta{/* ... */} + Spec: infrav1alpha1.LinodeMachineSpec{/* ... */} + } + + suite.Run( + Once("create resource", func(ctx context.Context, _ Mock) { + // Use the EnvTest k8sClient to create the resource in the test server + Expect(k8sClient.Create(ctx, &obj).To(Succeed())) + }), + Call("create a linode", func(ctx context.Context, mck Mock) { + mck.MachineClient.CreateInstance(ctx, gomock.Any(), gomock.Any()).Return(&linodego.Instance{/* ... */}, nil) + }), + Result("update the resource status after linode creation", func(ctx context.Context, mck Mock) { + reconciler := LinodeMachineReconciler{ + // Configure the reconciler to use the mock client for this test path + LinodeClient: mck.MachineClient, + // Use a managed recorder for capturing events published during this test + Recorder: mck.Recorder(), + // Use a managed logger for capturing logs written during the test + // Note: This isn't a real struct field in LinodeMachineReconciler. A logger is configured elsewhere. + Logger: mck.Logger(), + } + + _, err := reconciler.Reconcile(ctx, reconcile.Request{/* ... */}) + Expect(err).NotTo(HaveOccurred()) + + // Fetch the updated object in the test server and confirm it was updated + Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(obj))).To(Succeed()) + Expect(obj.Status.Ready).To(BeTrue()) + + // Check for expected events and logs + Expect(mck.Events()).To(ContainSubstring("Linode created!")) + Expect(mck.Logs()).To(ContainSubstring("Linode created!")) + }), + ) +}) +``` ## E2E Tests For e2e tests CAPL uses the [Chainsaw project](https://kyverno.github.io/chainsaw) which leverages `kind` and `tilt` to diff --git a/mock/client.go b/mock/client.go index fb5748fe2..f2bdd9215 100644 --- a/mock/client.go +++ b/mock/client.go @@ -860,31 +860,31 @@ func (mr *MockLinodeObjectStorageClientMockRecorder) GetObjectStorageKey(ctx, ke return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectStorageKey", reflect.TypeOf((*MockLinodeObjectStorageClient)(nil).GetObjectStorageKey), ctx, keyID) } -// Mockk8sClient is a mock of k8sClient interface. -type Mockk8sClient struct { +// MockK8sClient is a mock of K8sClient interface. +type MockK8sClient struct { ctrl *gomock.Controller - recorder *Mockk8sClientMockRecorder + recorder *MockK8sClientMockRecorder } -// Mockk8sClientMockRecorder is the mock recorder for Mockk8sClient. -type Mockk8sClientMockRecorder struct { - mock *Mockk8sClient +// MockK8sClientMockRecorder is the mock recorder for MockK8sClient. +type MockK8sClientMockRecorder struct { + mock *MockK8sClient } -// NewMockk8sClient creates a new mock instance. -func NewMockk8sClient(ctrl *gomock.Controller) *Mockk8sClient { - mock := &Mockk8sClient{ctrl: ctrl} - mock.recorder = &Mockk8sClientMockRecorder{mock} +// NewMockK8sClient creates a new mock instance. +func NewMockK8sClient(ctrl *gomock.Controller) *MockK8sClient { + mock := &MockK8sClient{ctrl: ctrl} + mock.recorder = &MockK8sClientMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. -func (m *Mockk8sClient) EXPECT() *Mockk8sClientMockRecorder { +func (m *MockK8sClient) EXPECT() *MockK8sClientMockRecorder { return m.recorder } // Create mocks base method. -func (m *Mockk8sClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { +func (m *MockK8sClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { m.ctrl.T.Helper() varargs := []any{ctx, obj} for _, a := range opts { @@ -896,14 +896,14 @@ func (m *Mockk8sClient) Create(ctx context.Context, obj client.Object, opts ...c } // Create indicates an expected call of Create. -func (mr *Mockk8sClientMockRecorder) Create(ctx, obj any, opts ...any) *gomock.Call { +func (mr *MockK8sClientMockRecorder) Create(ctx, obj any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() varargs := append([]any{ctx, obj}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*Mockk8sClient)(nil).Create), varargs...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockK8sClient)(nil).Create), varargs...) } // Delete mocks base method. -func (m *Mockk8sClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { +func (m *MockK8sClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { m.ctrl.T.Helper() varargs := []any{ctx, obj} for _, a := range opts { @@ -915,14 +915,14 @@ func (m *Mockk8sClient) Delete(ctx context.Context, obj client.Object, opts ...c } // Delete indicates an expected call of Delete. -func (mr *Mockk8sClientMockRecorder) Delete(ctx, obj any, opts ...any) *gomock.Call { +func (mr *MockK8sClientMockRecorder) Delete(ctx, obj any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() varargs := append([]any{ctx, obj}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*Mockk8sClient)(nil).Delete), varargs...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockK8sClient)(nil).Delete), varargs...) } // DeleteAllOf mocks base method. -func (m *Mockk8sClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { +func (m *MockK8sClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { m.ctrl.T.Helper() varargs := []any{ctx, obj} for _, a := range opts { @@ -934,14 +934,14 @@ func (m *Mockk8sClient) DeleteAllOf(ctx context.Context, obj client.Object, opts } // DeleteAllOf indicates an expected call of DeleteAllOf. -func (mr *Mockk8sClientMockRecorder) DeleteAllOf(ctx, obj any, opts ...any) *gomock.Call { +func (mr *MockK8sClientMockRecorder) DeleteAllOf(ctx, obj any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() varargs := append([]any{ctx, obj}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAllOf", reflect.TypeOf((*Mockk8sClient)(nil).DeleteAllOf), varargs...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAllOf", reflect.TypeOf((*MockK8sClient)(nil).DeleteAllOf), varargs...) } // Get mocks base method. -func (m *Mockk8sClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { +func (m *MockK8sClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { m.ctrl.T.Helper() varargs := []any{ctx, key, obj} for _, a := range opts { @@ -953,14 +953,14 @@ func (m *Mockk8sClient) Get(ctx context.Context, key client.ObjectKey, obj clien } // Get indicates an expected call of Get. -func (mr *Mockk8sClientMockRecorder) Get(ctx, key, obj any, opts ...any) *gomock.Call { +func (mr *MockK8sClientMockRecorder) Get(ctx, key, obj any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() varargs := append([]any{ctx, key, obj}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*Mockk8sClient)(nil).Get), varargs...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockK8sClient)(nil).Get), varargs...) } // GroupVersionKindFor mocks base method. -func (m *Mockk8sClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { +func (m *MockK8sClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GroupVersionKindFor", obj) ret0, _ := ret[0].(schema.GroupVersionKind) @@ -969,13 +969,13 @@ func (m *Mockk8sClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVer } // GroupVersionKindFor indicates an expected call of GroupVersionKindFor. -func (mr *Mockk8sClientMockRecorder) GroupVersionKindFor(obj any) *gomock.Call { +func (mr *MockK8sClientMockRecorder) GroupVersionKindFor(obj any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GroupVersionKindFor", reflect.TypeOf((*Mockk8sClient)(nil).GroupVersionKindFor), obj) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GroupVersionKindFor", reflect.TypeOf((*MockK8sClient)(nil).GroupVersionKindFor), obj) } // IsObjectNamespaced mocks base method. -func (m *Mockk8sClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { +func (m *MockK8sClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "IsObjectNamespaced", obj) ret0, _ := ret[0].(bool) @@ -984,13 +984,13 @@ func (m *Mockk8sClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { } // IsObjectNamespaced indicates an expected call of IsObjectNamespaced. -func (mr *Mockk8sClientMockRecorder) IsObjectNamespaced(obj any) *gomock.Call { +func (mr *MockK8sClientMockRecorder) IsObjectNamespaced(obj any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsObjectNamespaced", reflect.TypeOf((*Mockk8sClient)(nil).IsObjectNamespaced), obj) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsObjectNamespaced", reflect.TypeOf((*MockK8sClient)(nil).IsObjectNamespaced), obj) } // List mocks base method. -func (m *Mockk8sClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { +func (m *MockK8sClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { m.ctrl.T.Helper() varargs := []any{ctx, list} for _, a := range opts { @@ -1002,14 +1002,14 @@ func (m *Mockk8sClient) List(ctx context.Context, list client.ObjectList, opts . } // List indicates an expected call of List. -func (mr *Mockk8sClientMockRecorder) List(ctx, list any, opts ...any) *gomock.Call { +func (mr *MockK8sClientMockRecorder) List(ctx, list any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() varargs := append([]any{ctx, list}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*Mockk8sClient)(nil).List), varargs...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockK8sClient)(nil).List), varargs...) } // Patch mocks base method. -func (m *Mockk8sClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { +func (m *MockK8sClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { m.ctrl.T.Helper() varargs := []any{ctx, obj, patch} for _, a := range opts { @@ -1021,14 +1021,14 @@ func (m *Mockk8sClient) Patch(ctx context.Context, obj client.Object, patch clie } // Patch indicates an expected call of Patch. -func (mr *Mockk8sClientMockRecorder) Patch(ctx, obj, patch any, opts ...any) *gomock.Call { +func (mr *MockK8sClientMockRecorder) Patch(ctx, obj, patch any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() varargs := append([]any{ctx, obj, patch}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Patch", reflect.TypeOf((*Mockk8sClient)(nil).Patch), varargs...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Patch", reflect.TypeOf((*MockK8sClient)(nil).Patch), varargs...) } // RESTMapper mocks base method. -func (m *Mockk8sClient) RESTMapper() meta.RESTMapper { +func (m *MockK8sClient) RESTMapper() meta.RESTMapper { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RESTMapper") ret0, _ := ret[0].(meta.RESTMapper) @@ -1036,13 +1036,13 @@ func (m *Mockk8sClient) RESTMapper() meta.RESTMapper { } // RESTMapper indicates an expected call of RESTMapper. -func (mr *Mockk8sClientMockRecorder) RESTMapper() *gomock.Call { +func (mr *MockK8sClientMockRecorder) RESTMapper() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RESTMapper", reflect.TypeOf((*Mockk8sClient)(nil).RESTMapper)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RESTMapper", reflect.TypeOf((*MockK8sClient)(nil).RESTMapper)) } // Scheme mocks base method. -func (m *Mockk8sClient) Scheme() *runtime.Scheme { +func (m *MockK8sClient) Scheme() *runtime.Scheme { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Scheme") ret0, _ := ret[0].(*runtime.Scheme) @@ -1050,13 +1050,13 @@ func (m *Mockk8sClient) Scheme() *runtime.Scheme { } // Scheme indicates an expected call of Scheme. -func (mr *Mockk8sClientMockRecorder) Scheme() *gomock.Call { +func (mr *MockK8sClientMockRecorder) Scheme() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Scheme", reflect.TypeOf((*Mockk8sClient)(nil).Scheme)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Scheme", reflect.TypeOf((*MockK8sClient)(nil).Scheme)) } // Status mocks base method. -func (m *Mockk8sClient) Status() client.SubResourceWriter { +func (m *MockK8sClient) Status() client.SubResourceWriter { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Status") ret0, _ := ret[0].(client.SubResourceWriter) @@ -1064,13 +1064,13 @@ func (m *Mockk8sClient) Status() client.SubResourceWriter { } // Status indicates an expected call of Status. -func (mr *Mockk8sClientMockRecorder) Status() *gomock.Call { +func (mr *MockK8sClientMockRecorder) Status() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Status", reflect.TypeOf((*Mockk8sClient)(nil).Status)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Status", reflect.TypeOf((*MockK8sClient)(nil).Status)) } // SubResource mocks base method. -func (m *Mockk8sClient) SubResource(subResource string) client.SubResourceClient { +func (m *MockK8sClient) SubResource(subResource string) client.SubResourceClient { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SubResource", subResource) ret0, _ := ret[0].(client.SubResourceClient) @@ -1078,13 +1078,13 @@ func (m *Mockk8sClient) SubResource(subResource string) client.SubResourceClient } // SubResource indicates an expected call of SubResource. -func (mr *Mockk8sClientMockRecorder) SubResource(subResource any) *gomock.Call { +func (mr *MockK8sClientMockRecorder) SubResource(subResource any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubResource", reflect.TypeOf((*Mockk8sClient)(nil).SubResource), subResource) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubResource", reflect.TypeOf((*MockK8sClient)(nil).SubResource), subResource) } // Update mocks base method. -func (m *Mockk8sClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { +func (m *MockK8sClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { m.ctrl.T.Helper() varargs := []any{ctx, obj} for _, a := range opts { @@ -1096,8 +1096,8 @@ func (m *Mockk8sClient) Update(ctx context.Context, obj client.Object, opts ...c } // Update indicates an expected call of Update. -func (mr *Mockk8sClientMockRecorder) Update(ctx, obj any, opts ...any) *gomock.Call { +func (mr *MockK8sClientMockRecorder) Update(ctx, obj any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() varargs := append([]any{ctx, obj}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*Mockk8sClient)(nil).Update), varargs...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockK8sClient)(nil).Update), varargs...) } diff --git a/mock/common.go b/mock/common.go new file mode 100644 index 000000000..20a035d4e --- /dev/null +++ b/mock/common.go @@ -0,0 +1,41 @@ +package mock + +import ( + gomock "go.uber.org/mock/gomock" +) + +// MockClient is a common interface for generated mock clients. +// Each implementation is not generated and must be updated manually. +type MockClient interface { + mock() +} + +func (MockLinodeMachineClient) mock() {} +func (MockLinodeVPCClient) mock() {} +func (MockLinodeNodeBalancerClient) mock() {} +func (MockLinodeObjectStorageClient) mock() {} +func (MockK8sClient) mock() {} + +// MockClients holds mock clients that may be instantiated. +type MockClients struct { + MachineClient *MockLinodeMachineClient + VPCClient *MockLinodeVPCClient + NodeBalancerClient *MockLinodeNodeBalancerClient + ObjectStorageClient *MockLinodeObjectStorageClient + K8sClient *MockK8sClient +} + +func (mc *MockClients) Build(client MockClient, ctrl *gomock.Controller) { + switch client.(type) { + case MockLinodeMachineClient, *MockLinodeMachineClient: + mc.MachineClient = NewMockLinodeMachineClient(ctrl) + case MockLinodeVPCClient, *MockLinodeVPCClient: + mc.VPCClient = NewMockLinodeVPCClient(ctrl) + case MockLinodeNodeBalancerClient, *MockLinodeNodeBalancerClient: + mc.NodeBalancerClient = NewMockLinodeNodeBalancerClient(ctrl) + case MockLinodeObjectStorageClient, *MockLinodeObjectStorageClient: + mc.ObjectStorageClient = NewMockLinodeObjectStorageClient(ctrl) + case MockK8sClient, *MockK8sClient: + mc.K8sClient = NewMockK8sClient(ctrl) + } +} diff --git a/mock/mocktest/mock.go b/mock/mocktest/mock.go new file mode 100644 index 000000000..fe426949e --- /dev/null +++ b/mock/mocktest/mock.go @@ -0,0 +1,60 @@ +package mocktest + +import ( + "bytes" + "strings" + + "github.com/go-logr/logr" + "go.uber.org/mock/gomock" + "k8s.io/client-go/tools/record" + + "github.com/linode/cluster-api-provider-linode/mock" +) + +// Mock holds configuration for a single test path. +type Mock struct { + gomock.TestReporter + mock.MockClients + + recorder *record.FakeRecorder + events string + logger logr.Logger + logs *bytes.Buffer +} + +// Recorder returns a *FakeRecorder for recording events published in a reconcile loop. +// Events can be consumed as a single string by calling Events(). +func (m *Mock) Recorder() *record.FakeRecorder { + return m.recorder +} + +// Logger returns a logr.Logger for capturing logs written during a reconcile loop. +// Log output can be read as a single string by calling Logs(). +func (m *Mock) Logger() logr.Logger { + return m.logger +} + +// Events returns a string of all events currently recorded during path evaluation. +func (m *Mock) Events() string { + if m.recorder == nil { + panic("no recorder configured on Mock") + } + + var strBuilder strings.Builder + for len(m.recorder.Events) > 0 { + strBuilder.WriteString(<-m.recorder.Events) + } + + m.events += strBuilder.String() + + return m.events +} + +// Logs returns a string of all log outputs currently written during path evaluation. +func (m *Mock) Logs() string { + if m.logs == nil { + panic("no logger configured on Mock") + } + + return m.logs.String() +} diff --git a/mock/mocktest/mock_test.go b/mock/mocktest/mock_test.go new file mode 100644 index 000000000..ac723c59b --- /dev/null +++ b/mock/mocktest/mock_test.go @@ -0,0 +1,25 @@ +package mocktest + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestEventsWithoutRecorder(t *testing.T) { + t.Parallel() + + mck := Mock{} + assert.Panics(t, func() { + mck.Events() + }) +} + +func TestLogsWithoutLogger(t *testing.T) { + t.Parallel() + + mck := Mock{} + assert.Panics(t, func() { + mck.Logs() + }) +} diff --git a/mock/mocktest/node.go b/mock/mocktest/node.go new file mode 100644 index 000000000..eb9c352ad --- /dev/null +++ b/mock/mocktest/node.go @@ -0,0 +1,143 @@ +package mocktest + +import ( + "context" +) + +// DescribePaths computes all permutations for the given nodes +// and returns a slice of strings describing each permutation. +func DescribePaths(nodes ...node) []string { + pths := mkPaths(nodes...) + return pths.describe() +} + +// Common interface for defining permutations of test paths as a tree. +type node interface { + update(staged, committed paths) (st, com paths) +} + +// A container for describing and holding a function. +type fn struct { + text string + does func(context.Context, Mock) + ran bool +} + +// Call declares a function for mocking method calls on a single mock client. +func Call(text string, does func(context.Context, Mock)) call { + return call{ + text: text, + does: does, + } +} + +// Contains a function for mocking method calls on a single mock client. +type call fn + +// Adds the call to each staged path. +func (c call) update(staged, committed paths) (st, com paths) { + for idx, pth := range staged { + newCalls := make([]call, len(pth.calls), len(pth.calls)+1) + copy(newCalls, pth.calls) + staged[idx] = path{ + once: pth.once, + calls: append(newCalls, c), + } + } + + return staged, committed +} + +// Result terminates a test path with a function that tests the effects of mocked method calls. +func Result(text string, does func(context.Context, Mock)) result { + return result{ + text: text, + does: does, + } +} + +// Contains a function that tests the effects of mocked method calls. +type result fn + +// Commits each staged path with the result. +func (r result) update(staged, committed paths) (st, com paths) { + for idx := range staged { + staged[idx].result = r + } + + committed = append(committed, staged...) + staged = paths{} + + return staged, committed +} + +// Once declares a function that runs one time when executing all test paths. +// It is triggered at the beginning of the leftmost test path where it is inserted. +func Once(text string, does func(context.Context, Mock)) once { + return once{ + text: text, + does: does, + } +} + +// Contains a function that will only run once. +type once fn + +// Adds once to the first staged path. +// It will only be invoked once in the first path to be evaluated. +func (o once) update(staged, committed paths) (st, com paths) { + if len(staged) > 0 { + staged[0].once = append(staged[0].once, &o) + } + + return staged, committed +} + +// Path declares a sequence of nodes belonging to the same test path. +func Path(nodes ...node) allOf { + if len(nodes) == 0 { + panic("Path called with no nodes") + } + + return nodes +} + +// A container for defining nodes added to the same test path. +type allOf []node + +// Adds all nodes to each staged path, committing paths whenever a result is included. +func (a allOf) update(staged, committed paths) (st, com paths) { + for _, impl := range a { + staged, committed = impl.update(staged, committed) + } + + return staged, committed +} + +// OneOf declares multiple nodes that fork out into unique test paths. +func OneOf(nodes ...allOf) oneOf { + if len(nodes) == 0 { + panic("OneOf called with no nodes") + } + + return nodes +} + +// A container for defining nodes that fork out into unique test paths. +type oneOf []allOf + +// Generates new permutations of each staged path with each node. +// Each node should never occur on the same path. +func (o oneOf) update(staged, committed paths) (st, com paths) { + permutations := paths{} + + for _, pth := range staged { + for _, impl := range o { + var localPerms paths + localPerms, committed = impl.update(paths{pth}, committed) + permutations = append(permutations, localPerms...) + } + } + + return permutations, committed +} diff --git a/mock/mocktest/node_test.go b/mock/mocktest/node_test.go new file mode 100644 index 000000000..04efc58bb --- /dev/null +++ b/mock/mocktest/node_test.go @@ -0,0 +1,23 @@ +package mocktest + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestOneOfWithoutNodes(t *testing.T) { + t.Parallel() + + assert.Panics(t, func() { + OneOf() + }) +} + +func TestPathWithoutNodes(t *testing.T) { + t.Parallel() + + assert.Panics(t, func() { + Path() + }) +} diff --git a/mock/mocktest/path.go b/mock/mocktest/path.go new file mode 100644 index 000000000..f73a5e9b5 --- /dev/null +++ b/mock/mocktest/path.go @@ -0,0 +1,120 @@ +package mocktest + +import ( + "context" + "errors" + "strings" + "testing" + + "github.com/onsi/ginkgo/v2" +) + +// A container for mock calls and a function for asserting results. +type path struct { + // Store as pointers so each path can check if it was invoked + once []*once + calls []call + result +} + +// Generates a string of all nodes belonging to a test path. +func (p path) describe() string { + text := make([]string, 0, len(p.calls)+1) + for _, c := range p.calls { + text = append(text, c.text) + } + text = append(text, p.result.text) + return strings.Join(text, " > ") +} + +// Evaluates all declared mock client methods and assertions for the given test path. +func (p path) run(ctx context.Context, mck Mock) { + if mck.TestReporter == nil { + panic("Mock requires TestReporter, i.e. *testing.T, GinkgoT()") + } + + for _, o := range p.once { + evalOnce(ctx, mck, o) + } + for _, c := range p.calls { + evalFn(ctx, mck, fn(c)) + } + + evalFn(ctx, mck, fn(p.result)) +} + +func evalFn(ctx context.Context, mck Mock, fun fn) { + switch tt := mck.TestReporter.(type) { + case *testing.T: + tt.Log(fun.text) + case ginkgo.GinkgoTInterface: + ginkgo.By(fun.text) + } + + fun.does(ctx, mck) +} + +func evalOnce(ctx context.Context, mck Mock, fun *once) { + if fun.ran { + return + } + + evalFn(ctx, mck, fn(*fun)) + + fun.ran = true +} + +type paths []path + +func (ps paths) describe() []string { + texts := make([]string, 0, len(ps)) + described := make(map[*once]bool) + + for _, pth := range ps { + var text strings.Builder + for _, o := range pth.once { + if !described[o] { + text.WriteString(o.text + " > ") + described[o] = true + } + } + text.WriteString(pth.describe()) + texts = append(texts, text.String()) + } + + return texts +} + +// Declares one or more test paths with mock clients. +// It traverses each node and their children, returning a list of permutations of test paths. +func mkPaths(nodes ...node) paths { + if len(nodes) == 0 { + return paths{} + } + + staged, committed := rPaths(paths{}, paths{}, nodes) + if len(staged) > 0 { + panic(errors.New("unresolved path detected")) + } + + return committed +} + +func rPaths(staged, committed paths, each []node) (st, com paths) { + if len(each) == 0 { + return staged, committed + } + + // Get the current node to add to staged/committed. + head, tail := each[0], each[1:] + + // If there are no open paths, make a new path. + if len(staged) == 0 { + staged = append(staged, path{}) + } + + // Add to staged/committed. + staged, committed = head.update(staged, committed) + + return rPaths(staged, committed, tail) +} diff --git a/mock/mocktest/path_test.go b/mock/mocktest/path_test.go new file mode 100644 index 000000000..8b70dfc07 --- /dev/null +++ b/mock/mocktest/path_test.go @@ -0,0 +1,588 @@ +package mocktest + +import ( + "context" + "errors" + "testing" + + "github.com/linode/linodego" + "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" + "sigs.k8s.io/controller-runtime/pkg/client" + + infrav1alpha1 "github.com/linode/cluster-api-provider-linode/api/v1alpha1" + "github.com/linode/cluster-api-provider-linode/mock" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestUsage(t *testing.T) { + t.Parallel() + + RegisterFailHandler(Fail) + RunSpecs(t, "Controller Suite") +} + +var _ = Describe("k8s client", Label("k8sclient"), func() { + var mockCtrl *gomock.Controller + + BeforeEach(func() { + mockCtrl = gomock.NewController(GinkgoT()) + }) + + AfterEach(func() { + mockCtrl.Finish() + }) + + for _, pth := range mkPaths( + Once("setup", func(_ context.Context, _ Mock) {}), + Call("fetch object", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + }), + Result("no error", func(ctx context.Context, mck Mock) { + Expect(contrivedCalls(ctx, mck)).To(Succeed()) + }), + ) { + It(pth.describe(), func(ctx SpecContext) { + pth.run(ctx, Mock{ + TestReporter: GinkgoT(), + MockClients: mock.MockClients{ + K8sClient: mock.NewMockK8sClient(mockCtrl), + }, + }) + }) + } +}) + +var _ = Describe("multiple clients", Label("multiple"), func() { + var mockCtrl *gomock.Controller + + BeforeEach(func() { + mockCtrl = gomock.NewController(GinkgoT()) + }) + + AfterEach(func() { + mockCtrl.Finish() + }) + + for _, pth := range mkPaths( + Call("read object", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + }), + OneOf( + Path( + Call("underlying exists", func(ctx context.Context, mck Mock) { + mck.MachineClient.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Return(&linodego.Instance{ID: 1}, nil) + }), + Result("no error", func(ctx context.Context, mck Mock) { + Expect(contrivedCalls(ctx, mck)).To(Succeed()) + }), + ), + Path( + Call("underlying does not exist", func(ctx context.Context, mck Mock) { + mck.MachineClient.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Return(nil, errors.New("404")) + }), + Result("error", func(ctx context.Context, mck Mock) { + Expect(contrivedCalls(ctx, mck)).NotTo(Succeed()) + }), + ), + ), + ) { + It(pth.describe(), func(ctx SpecContext) { + pth.run(ctx, Mock{ + TestReporter: GinkgoT(), + MockClients: mock.MockClients{ + MachineClient: mock.NewMockLinodeMachineClient(mockCtrl), + K8sClient: mock.NewMockK8sClient(mockCtrl), + }, + }) + }) + } +}) + +func contrivedCalls(ctx context.Context, mck Mock) error { + GinkgoHelper() + + err := mck.K8sClient.Get(ctx, client.ObjectKey{}, &infrav1alpha1.LinodeMachine{}) + if err != nil { + return err + } + + if mck.MachineClient != nil { + _, err = mck.MachineClient.CreateInstance(ctx, linodego.InstanceCreateOptions{}) + if err != nil { + return err + } + } + + return nil +} + +func TestPaths(t *testing.T) { + t.Parallel() + + for _, testCase := range []struct { + name string + input []node + output paths + describe []string + panic bool + }{ + { + name: "empty", + input: []node{}, + output: paths{}, + describe: []string{}, + }, + { + name: "basic", + input: []node{ + call{text: "0"}, + result{text: "0"}, + }, + output: paths{ + { + calls: []call{{text: "0"}}, + result: result{text: "0"}, + }, + }, + describe: []string{ + "0 > 0", + }, + }, + { + name: "open", + input: []node{ + call{text: "0"}, + }, + panic: true, + }, + { + name: "open fork", + input: []node{ + call{text: "0"}, + oneOf{ + allOf{call{text: "1"}}, + allOf{call{text: "1"}, result{text: "1"}}, + }, + }, + panic: true, + }, + { + name: "split", + input: []node{ + call{text: "0"}, + oneOf{ + allOf{call{text: "1"}}, + allOf{call{text: "2"}}, + }, + result{text: "4"}, + }, + output: paths{ + { + calls: []call{ + {text: "0"}, + {text: "1"}, + }, + result: result{text: "4"}, + }, + { + calls: []call{ + {text: "0"}, + {text: "2"}, + }, + result: result{text: "4"}, + }, + }, + describe: []string{ + "0 > 1 > 4", + "0 > 2 > 4", + }, + }, + { + name: "recursive", + input: []node{ + oneOf{ + allOf{oneOf{ + allOf{call{text: "0"}}, + allOf{oneOf{ + allOf{call{text: "1"}}, + allOf{call{text: "2"}}, + }}, + }}, + allOf{oneOf{ + allOf{call{text: "3"}}, + allOf{oneOf{ + allOf{call{text: "4"}}, + allOf{call{text: "5"}}, + }}, + }}, + }, + result{text: "6"}, + }, + output: paths{ + { + calls: []call{ + {text: "0"}, + }, + result: result{text: "6"}, + }, + { + calls: []call{ + {text: "1"}, + }, + result: result{text: "6"}, + }, + { + calls: []call{ + {text: "2"}, + }, + result: result{text: "6"}, + }, + { + calls: []call{ + {text: "3"}, + }, + result: result{text: "6"}, + }, + { + calls: []call{ + {text: "4"}, + }, + result: result{text: "6"}, + }, + { + calls: []call{ + {text: "5"}, + }, + result: result{text: "6"}, + }, + }, + describe: []string{ + "0 > 6", + "1 > 6", + "2 > 6", + "3 > 6", + "4 > 6", + "5 > 6", + }, + }, + { + name: "close order", + input: []node{ + call{text: "0"}, + oneOf{ + allOf{call{text: "1"}}, + allOf{result{text: "2"}}, + }, + result{text: "3"}, + }, + output: paths{ + { + calls: []call{ + {text: "0"}, + }, + result: result{text: "2"}, + }, + { + calls: []call{ + {text: "0"}, + {text: "1"}, + }, + result: result{text: "3"}, + }, + }, + describe: []string{ + "0 > 2", + "0 > 1 > 3", + }, + }, + { + name: "path order", + input: []node{ + oneOf{ + allOf{call{text: "0"}, result{text: "0"}}, + allOf{call{text: "1"}}, + }, + oneOf{ + allOf{call{text: "2"}, result{text: "2"}}, + allOf{call{text: "3"}, result{text: "3"}}, + }, + }, + output: paths{ + { + calls: []call{{text: "0"}}, + result: result{text: "0"}, + }, + { + calls: []call{ + {text: "1"}, + {text: "2"}, + }, + result: result{text: "2"}, + }, + { + calls: []call{ + {text: "1"}, + {text: "3"}, + }, + result: result{text: "3"}, + }, + }, + describe: []string{ + "0 > 0", + "1 > 2 > 2", + "1 > 3 > 3", + }, + }, + { + name: "once", + input: []node{ + once{text: "0"}, + oneOf{ + allOf{call{text: "1"}, result{text: "1"}}, + allOf{call{text: "1"}}, + }, + oneOf{ + allOf{call{text: "2"}, result{text: "2"}}, + allOf{call{text: "2"}}, + }, + result{text: "3"}, + once{text: "4"}, + oneOf{ + allOf{call{text: "5"}, result{text: "5"}}, + allOf{call{text: "5"}}, + }, + oneOf{ + allOf{call{text: "6"}, result{text: "6"}}, + allOf{call{text: "7"}, result{text: "7"}}, + }, + }, + output: paths{ + { + once: []*once{{text: "0"}}, + calls: []call{{text: "1"}}, + result: result{text: "1"}, + }, + { + once: []*once{{text: "0"}}, + calls: []call{ + {text: "1"}, + {text: "2"}, + }, + result: result{text: "2"}, + }, + { + once: []*once{{text: "0"}}, + calls: []call{ + {text: "1"}, + {text: "2"}, + }, + result: result{text: "3"}, + }, + { + once: []*once{{text: "4"}}, + calls: []call{{text: "5"}}, + result: result{text: "5"}, + }, + { + once: []*once{{text: "4"}}, + calls: []call{ + {text: "5"}, + {text: "6"}, + }, + result: result{text: "6"}, + }, + { + once: []*once{{text: "4"}}, + calls: []call{ + {text: "5"}, + {text: "7"}, + }, + result: result{text: "7"}, + }, + }, + describe: []string{ + "0 > 1 > 1", + "1 > 2 > 2", + "1 > 2 > 3", + "4 > 5 > 5", + "5 > 6 > 6", + "5 > 7 > 7", + }, + }, + { + name: "no shared state", + input: []node{ + call{text: "mock1"}, + oneOf{ + allOf{call{text: "mock1.1"}, result{text: "result1"}}, + allOf{call{text: "mock2"}}, + }, + call{text: "mock3"}, + oneOf{ + allOf{call{text: "mock3.1"}, result{text: "result2"}}, + allOf{call{text: "mock3.2"}, result{text: "result3"}}, + }, + }, + output: paths{ + { + calls: []call{ + {text: "mock1"}, + {text: "mock1.1"}, + }, + result: result{text: "result1"}, + }, + { + calls: []call{ + {text: "mock1"}, + {text: "mock2"}, + {text: "mock3"}, + {text: "mock3.1"}, + }, + result: result{text: "result2"}, + }, + { + calls: []call{ + {text: "mock1"}, + {text: "mock2"}, + {text: "mock3"}, + {text: "mock3.2"}, + }, + result: result{text: "result3"}, + }, + }, + describe: []string{ + "mock1 > mock1.1 > result1", + "mock1 > mock2 > mock3 > mock3.1 > result2", + "mock1 > mock2 > mock3 > mock3.2 > result3", + }, + }, + { + name: "docs", + input: []node{ + oneOf{ + allOf{ + call{text: "instance exists and is not offline"}, + result{text: "success"}, + }, + allOf{ + call{text: "instance does not exist"}, + oneOf{ + allOf{call{text: "able to be created"}}, + allOf{ + call{text: "not able to be created"}, + result{text: "error"}, + }, + }, + }, + allOf{call{text: "instance exists but is offline"}}, + }, + oneOf{ + allOf{ + call{text: "able to boot"}, + result{text: "success"}, + }, + allOf{ + call{text: "not able to boot"}, + result{text: "error"}, + }, + }, + }, + output: paths{ + { + calls: []call{{text: "instance exists and is not offline"}}, + result: result{text: "success"}, + }, + { + calls: []call{ + {text: "instance does not exist"}, + {text: "not able to be created"}, + }, + result: result{text: "error"}, + }, + { + calls: []call{ + {text: "instance does not exist"}, + {text: "able to be created"}, + {text: "able to boot"}, + }, + result: result{text: "success"}, + }, + { + calls: []call{ + {text: "instance does not exist"}, + {text: "able to be created"}, + {text: "not able to boot"}, + }, + result: result{text: "error"}, + }, + { + calls: []call{ + {text: "instance exists but is offline"}, + {text: "able to boot"}, + }, + result: result{text: "success"}, + }, + { + calls: []call{ + {text: "instance exists but is offline"}, + {text: "not able to boot"}, + }, + result: result{text: "error"}, + }, + }, + describe: []string{ + "instance exists and is not offline > success", + "instance does not exist > not able to be created > error", + "instance does not exist > able to be created > able to boot > success", + "instance does not exist > able to be created > not able to boot > error", + "instance exists but is offline > able to boot > success", + "instance exists but is offline > not able to boot > error", + }, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + if testCase.panic { + assert.Panics(t, func() { + mkPaths(testCase.input...) + }) + return + } + + actual := mkPaths(testCase.input...) + assert.Equal(t, testCase.output, actual) + assert.Equal(t, testCase.describe, actual.describe()) + assert.Equal(t, DescribePaths(testCase.input...), actual.describe()) + }) + } +} + +func TestRunWithoutTestReporter(t *testing.T) { + t.Parallel() + + pth := path{} + assert.Panics(t, func() { + pth.run(context.Background(), Mock{}) + }) +} + +func TestEvalOnceOnlyCallsOnce(t *testing.T) { + t.Parallel() + + var toggle bool + + onceFn := once{does: func(_ context.Context, _ Mock) { + toggle = !toggle + }} + + ctx := context.Background() + mck := Mock{} + evalOnce(ctx, mck, &onceFn) + evalOnce(ctx, mck, &onceFn) + + assert.True(t, toggle) +} diff --git a/mock/mocktest/suite.go b/mock/mocktest/suite.go new file mode 100644 index 000000000..9b0605371 --- /dev/null +++ b/mock/mocktest/suite.go @@ -0,0 +1,144 @@ +package mocktest + +import ( + "bytes" + "context" + "errors" + "testing" + + "github.com/onsi/ginkgo/v2" + "go.uber.org/mock/gomock" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + "github.com/linode/cluster-api-provider-linode/mock" +) + +type suite struct { + clients []mock.MockClient + beforeEach []fn + afterEach []fn +} + +func (s *suite) BeforeEach(action func(context.Context, Mock)) { + s.beforeEach = append(s.beforeEach, fn{ + text: "BeforeEach", + does: action, + }) +} + +func (s *suite) AfterEach(action func(context.Context, Mock)) { + s.afterEach = append(s.afterEach, fn{ + text: "AfterEach", + does: action, + }) +} + +type mockOpt func(*Mock) + +func (s *suite) run(t gomock.TestHelper, ctx context.Context, pth path, mockOpts ...mockOpt) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mck := Mock{ + TestReporter: t, + } + + for _, client := range s.clients { + mck.MockClients.Build(client, mockCtrl) + } + + for _, opt := range mockOpts { + opt(&mck) + } + + for _, fun := range s.beforeEach { + evalFn(ctx, mck, fun) + } + + pth.run(ctx, mck) + + for _, fun := range s.afterEach { + evalFn(ctx, mck, fun) + } + + // If a recorder is configured and events were not consumed, flush the channel + if mck.recorder != nil { + for len(mck.recorder.Events) > 0 { + <-mck.recorder.Events + } + } +} + +type standardSuite struct { + suite + + t *testing.T +} + +// NewSuite creates a test suite using Go's standard testing library. +// It generates new mock clients for each test path it runs. +func NewSuite(t *testing.T, clients ...mock.MockClient) *standardSuite { + t.Helper() + + if len(clients) == 0 { + panic(errors.New("unable to run tests without clients")) + } + + return &standardSuite{ + suite: suite{clients: clients}, + t: t, + } +} + +// Run calls t.Run for each computed test path. +func (ss *standardSuite) Run(nodes ...node) { + pths := mkPaths(nodes...) + + for _, pth := range pths { + ss.t.Run(pth.describe(), func(t *testing.T) { + t.Parallel() + + ss.suite.run(t, context.Background(), pth) + }) + } +} + +const recorderBufferSize = 20 + +type ctlrSuite struct { + suite + + ginkgoT ginkgo.FullGinkgoTInterface +} + +// NewControllerSuite creates a test suite for a controller. +// It generates new mock clients for each test path it runs. +func NewControllerSuite(ginkgoT ginkgo.FullGinkgoTInterface, clients ...mock.MockClient) *ctlrSuite { + if len(clients) == 0 { + panic(errors.New("unable to run tests without clients")) + } + + return &ctlrSuite{ + suite: suite{clients: clients}, + ginkgoT: ginkgoT, + } +} + +// Run executes Ginkgo test specs for each computed test path. +// It manages mock client instantiation, events, and logging. +func (cs *ctlrSuite) Run(nodes ...node) { + pths := mkPaths(nodes...) + + for _, pth := range pths { + ginkgo.It(pth.describe(), func(ctx ginkgo.SpecContext) { + cs.suite.run(cs.ginkgoT, ctx, pth, func(mck *Mock) { + // Create a recorder with a buffered channel for consuming event strings. + mck.recorder = record.NewFakeRecorder(recorderBufferSize) + // Create a logger that writes to both GinkgoWriter and the local logs buffer + mck.logs = &bytes.Buffer{} + mck.logger = zap.New(zap.WriteTo(ginkgo.GinkgoWriter), zap.WriteTo(mck.logs)) + }) + }) + } +} diff --git a/mock/mocktest/suite_test.go b/mock/mocktest/suite_test.go new file mode 100644 index 000000000..e411a69a1 --- /dev/null +++ b/mock/mocktest/suite_test.go @@ -0,0 +1,136 @@ +package mocktest + +import ( + "context" + "errors" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/linode/cluster-api-provider-linode/mock" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestSuitesNoClients(t *testing.T) { + t.Parallel() + + assert.Panics(t, func() { NewSuite(t) }) + assert.Panics(t, func() { NewControllerSuite(GinkgoT()) }) +} + +func TestSuite(t *testing.T) { + t.Parallel() + + //nolint:paralleltest // these tests should run prior to their nested t.Run + for _, testCase := range []struct { + name string + beforeEach, afterEach bool + expectedCount int + }{ + { + name: "beforeEach", + beforeEach: true, + expectedCount: 6, + }, + { + name: "afterEach", + afterEach: true, + expectedCount: 6, + }, + { + name: "both", + beforeEach: true, + afterEach: true, + expectedCount: 8, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + // Create a counter with the expected number of calls. + // As each call runs, the counter will decrement to 0. + var counter sync.WaitGroup + counter.Add(testCase.expectedCount) + + suite := NewSuite(t, mock.MockK8sClient{}) + if testCase.beforeEach { + suite.BeforeEach(func(_ context.Context, _ Mock) { counter.Done() }) + } + if testCase.afterEach { + suite.AfterEach(func(_ context.Context, _ Mock) { counter.Done() }) + } + + suite.Run( + OneOf( + Path(Call("", func(_ context.Context, _ Mock) { counter.Done() })), + Path(Call("", func(_ context.Context, _ Mock) { counter.Done() })), + ), + Result("", func(_ context.Context, _ Mock) { counter.Done() }), + ) + + // Wait until the counter reaches 0, or time out. + // This runs in a goroutine so the nested t.Runs are scheduled. + go func() { + select { + case <-waitCh(&counter): + return + case <-time.NewTimer(time.Second * 5).C: + assert.Error(t, errors.New(testCase.name)) + } + }() + }) + } +} + +func waitCh(counter *sync.WaitGroup) <-chan struct{} { + out := make(chan struct{}) + go func() { + counter.Wait() + out <- struct{}{} + }() + return out +} + +var _ = Describe("controller suite", Label("suite"), func() { + suite := NewControllerSuite(GinkgoT(), mock.MockK8sClient{}) + + suite.Run( + Call("call", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()).Return(nil) + }), + Result("result", func(ctx context.Context, mck Mock) { + mck.recorder.Event(nil, "", "", "event") + err := mck.K8sClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "myobj"}, nil) + Expect(err).NotTo(HaveOccurred()) + }), + ) +}) + +var _ = Describe("controller suite with events/logs", Label("suite"), func() { + suite := NewControllerSuite(GinkgoT(), mock.MockK8sClient{}) + + suite.Run( + OneOf( + Path(Call("call1", func(_ context.Context, mck Mock) { + mck.Recorder().Event(nil, "", "", "+") + mck.Logger().Info("+") + })), + Path(Call("call2", func(_ context.Context, mck Mock) { + mck.Recorder().Event(nil, "", "", "+") + mck.Logger().Info("+") + })), + ), + Result("result", func(_ context.Context, mck Mock) { + mck.Recorder().Event(nil, "", "", "+") + mck.Logger().Info("+") + + Expect(strings.Count(mck.Events(), "+")).To(Equal(2)) + Expect(strings.Count(mck.Logs(), "+")).To(Equal(2)) + }), + ) +}) From 4e12116fb76b9950e8e9285ad2df9b1dc6457e39 Mon Sep 17 00:00:00 2001 From: amold1 Date: Fri, 26 Apr 2024 12:03:17 -0400 Subject: [PATCH 3/3] adapt linodecluster controller tests to new mocktest changes (#277) * adapt linodecluster controller tests to new mocktest changes --- cloud/scope/cluster.go | 4 +- controller/linodecluster_controller.go | 1 + controller/linodecluster_controller_test.go | 399 +++++++++----------- 3 files changed, 190 insertions(+), 214 deletions(-) diff --git a/cloud/scope/cluster.go b/cloud/scope/cluster.go index 0573bf89c..7769df967 100644 --- a/cloud/scope/cluster.go +++ b/cloud/scope/cluster.go @@ -72,7 +72,7 @@ func NewClusterScope(ctx context.Context, apiKey string, params ClusterScopePara } return &ClusterScope{ - client: params.Client, + Client: params.Client, Cluster: params.Cluster, LinodeClient: linodeClient, LinodeCluster: params.LinodeCluster, @@ -82,7 +82,7 @@ func NewClusterScope(ctx context.Context, apiKey string, params ClusterScopePara // ClusterScope defines the basic context for an actuator to operate upon. type ClusterScope struct { - client K8sClient + Client K8sClient PatchHelper *patch.Helper LinodeClient LinodeNodeBalancerClient Cluster *clusterv1.Cluster diff --git a/controller/linodecluster_controller.go b/controller/linodecluster_controller.go index 18813b743..1b9763c3e 100644 --- a/controller/linodecluster_controller.go +++ b/controller/linodecluster_controller.go @@ -192,6 +192,7 @@ func (r *LinodeClusterReconciler) reconcileDelete(ctx context.Context, logger lo if clusterScope.LinodeCluster.Spec.Network.NodeBalancerID == nil { logger.Info("NodeBalancer ID is missing, nothing to do") controllerutil.RemoveFinalizer(clusterScope.LinodeCluster, infrav1alpha1.GroupVersion.String()) + r.Recorder.Event(clusterScope.LinodeCluster, corev1.EventTypeWarning, "NodeBalancerIDMissing", "NodeBalancer ID is missing, nothing to do") return nil } diff --git a/controller/linodecluster_controller_test.go b/controller/linodecluster_controller_test.go index 2bc495268..c0ac836be 100644 --- a/controller/linodecluster_controller_test.go +++ b/controller/linodecluster_controller_test.go @@ -17,24 +17,32 @@ package controller import ( - corev1 "k8s.io/api/core/v1" + "context" + "errors" + + "github.com/go-logr/logr" + "github.com/linode/linodego" + "go.uber.org/mock/gomock" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/record" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" infrav1 "github.com/linode/cluster-api-provider-linode/api/v1alpha1" + "github.com/linode/cluster-api-provider-linode/cloud/scope" + "github.com/linode/cluster-api-provider-linode/mock" + . "github.com/linode/cluster-api-provider-linode/mock/mocktest" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) -var _ = Describe("lifecycle", Ordered, Label("cluster", "lifecycle"), func() { - var reconciler *LinodeClusterReconciler +var _ = Describe("cluster-lifecycle", Ordered, Label("cluster", "cluster-lifecycle"), func() { nodebalancerID := 1 controlPlaneEndpointHost := "10.0.0.1" - clusterName := "lifecycle" + controlPlaneEndpointPort := 6443 + clusterName := "cluster-lifecycle" clusterNameSpace := "default" ownerRef := metav1.OwnerReference{ Name: clusterName, @@ -49,177 +57,141 @@ var _ = Describe("lifecycle", Ordered, Label("cluster", "lifecycle"), func() { OwnerReferences: ownerRefs, } - caplCluster := clusterv1.Cluster{ - ObjectMeta: metadata, - Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - Kind: "LinodeCluster", - Name: clusterName, - Namespace: clusterNameSpace, - }, - ControlPlaneRef: &corev1.ObjectReference{ - Kind: "KubeadmControlPlane", - Name: "lifecycle-control-plane", - Namespace: clusterNameSpace, - }, - }, - } - linodeCluster := infrav1.LinodeCluster{ ObjectMeta: metadata, Spec: infrav1.LinodeClusterSpec{ Region: "us-ord", - Network: infrav1.NetworkSpec{ - NodeBalancerID: &nodebalancerID, - }, - ControlPlaneEndpoint: clusterv1.APIEndpoint{ - Host: controlPlaneEndpointHost, - }, }, } - BeforeEach(func() { - reconciler = &LinodeClusterReconciler{ - Client: k8sClient, - LinodeApiKey: "test-key", - } - }) + ctlrSuite := NewControllerSuite(GinkgoT(), mock.MockLinodeNodeBalancerClient{}) + reconciler := LinodeClusterReconciler{} - It("should provision a control plane endpoint", func(ctx SpecContext) { - clusterKey := client.ObjectKeyFromObject(&linodeCluster) - Expect(k8sClient.Create(ctx, &caplCluster)).To(Succeed()) - Expect(k8sClient.Create(ctx, &linodeCluster)).To(Succeed()) - _, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: clusterKey, - }) - Expect(err).NotTo(HaveOccurred()) - - By("checking ready conditions") - Expect(k8sClient.Get(ctx, clusterKey, &linodeCluster)).To(Succeed()) - Expect(linodeCluster.Status.Ready).To(BeTrue()) - Expect(linodeCluster.Status.Conditions).To(HaveLen(1)) - Expect(linodeCluster.Status.Conditions[0].Type).To(Equal(clusterv1.ReadyCondition)) - - By("checking nb id") - Expect(linodeCluster.Spec.Network.NodeBalancerID).To(Equal(&nodebalancerID)) - - By("checking controlPlaneEndpoint host") - Expect(linodeCluster.Spec.ControlPlaneEndpoint.Host).To(Equal(controlPlaneEndpointHost)) - }) -}) - -var _ = Describe("no-capl-cluster", Ordered, Label("cluster", "no-capl-cluster"), func() { - var reconciler *LinodeClusterReconciler - nodebalancerID := 1 - controlPlaneEndpointHost := "10.0.0.1" - clusterName := "no-capl-cluster" - clusterNameSpace := "default" - metadata := metav1.ObjectMeta{ - Name: clusterName, - Namespace: clusterNameSpace, - } - - linodeCluster := infrav1.LinodeCluster{ - ObjectMeta: metadata, - Spec: infrav1.LinodeClusterSpec{ - Region: "us-ord", - Network: infrav1.NetworkSpec{ - NodeBalancerID: &nodebalancerID, - }, - ControlPlaneEndpoint: clusterv1.APIEndpoint{ - Host: controlPlaneEndpointHost, - }, - }, + cScope := &scope.ClusterScope{ + LinodeCluster: &linodeCluster, } - BeforeEach(func() { - reconciler = &LinodeClusterReconciler{ - Client: k8sClient, - LinodeApiKey: "test-key", - } - }) - - It("should fail reconciliation if no capl cluster exists", func(ctx SpecContext) { - clusterKey := client.ObjectKeyFromObject(&linodeCluster) + BeforeAll(func(ctx SpecContext) { + cScope.Client = k8sClient Expect(k8sClient.Create(ctx, &linodeCluster)).To(Succeed()) - _, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: clusterKey, - }) - Expect(err).NotTo(HaveOccurred()) - - By("checking ready conditions") - Expect(k8sClient.Get(ctx, clusterKey, &linodeCluster)).To(Succeed()) - Expect(linodeCluster.Status.Ready).To(BeFalseBecause("failed to get Cluster/no-capl-cluster: clusters.cluster.x-k8s.io \"no-capl-cluster\" not found")) }) -}) - -var _ = Describe("no-owner-ref", Ordered, Label("cluster", "no-owner-ref"), func() { - var reconciler *LinodeClusterReconciler - nodebalancerID := 1 - controlPlaneEndpointHost := "10.0.0.1" - clusterName := "no-owner-ref" - clusterNameSpace := "default" - metadata := metav1.ObjectMeta{ - Name: clusterName, - Namespace: clusterNameSpace, - } - - caplCluster := clusterv1.Cluster{ - ObjectMeta: metadata, - Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - Kind: "LinodeCluster", - Name: clusterName, - Namespace: clusterNameSpace, - }, - ControlPlaneRef: &corev1.ObjectReference{ - Kind: "KubeadmControlPlane", - Name: "lifecycle-control-plane", - Namespace: clusterNameSpace, - }, - }, - } - linodeCluster := infrav1.LinodeCluster{ - ObjectMeta: metadata, - Spec: infrav1.LinodeClusterSpec{ - Region: "us-ord", - Network: infrav1.NetworkSpec{ - NodeBalancerID: &nodebalancerID, - }, - ControlPlaneEndpoint: clusterv1.APIEndpoint{ - Host: controlPlaneEndpointHost, - }, - }, - } - - BeforeEach(func() { - reconciler = &LinodeClusterReconciler{ - Client: k8sClient, - LinodeApiKey: "test-key", - } - }) + ctlrSuite.BeforeEach(func(ctx context.Context, mck Mock) { + reconciler.Recorder = mck.Recorder() + clusterKey := client.ObjectKey{Name: "cluster-lifecycle", Namespace: "default"} + Expect(k8sClient.Get(ctx, clusterKey, &linodeCluster)).To(Succeed()) - It("linode cluster should remain NotReady", func(ctx SpecContext) { - clusterKey := client.ObjectKeyFromObject(&linodeCluster) - Expect(k8sClient.Create(ctx, &caplCluster)).To(Succeed()) - Expect(k8sClient.Create(ctx, &linodeCluster)).To(Succeed()) - _, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: clusterKey, - }) + // Create patch helper with latest state of resource. + // This is only needed when relying on envtest's k8sClient. + patchHelper, err := patch.NewHelper(&linodeCluster, k8sClient) Expect(err).NotTo(HaveOccurred()) - - By("checking ready conditions") - Expect(k8sClient.Get(ctx, clusterKey, &linodeCluster)).To(Succeed()) - Expect(linodeCluster.Status.Ready).To(BeFalse()) - Expect(linodeCluster.Status.FailureMessage).To(BeNil()) - Expect(linodeCluster.Status.FailureReason).To(BeNil()) + cScope.PatchHelper = patchHelper }) + + ctlrSuite.Run( + OneOf( + Path( + Call("cluster is created", func(ctx context.Context, mck Mock) { + cScope.LinodeClient = mck.NodeBalancerClient + getNB := mck.NodeBalancerClient.EXPECT().ListNodeBalancers(gomock.Any(), gomock.Any()).Return(nil, nil) + mck.NodeBalancerClient.EXPECT().CreateNodeBalancer(gomock.Any(), gomock.Any()). + After(getNB). + Return(&linodego.NodeBalancer{ + ID: nodebalancerID, + IPv4: &controlPlaneEndpointHost, + }, nil) + mck.NodeBalancerClient.EXPECT().CreateNodeBalancerConfig(gomock.Any(), gomock.Any(), gomock.Any()).After(getNB).Return(&linodego.NodeBalancerConfig{ + Port: controlPlaneEndpointPort, + Protocol: linodego.ProtocolTCP, + Algorithm: linodego.AlgorithmRoundRobin, + Check: linodego.CheckConnection, + NodeBalancerID: nodebalancerID, + }, nil) + }), + ), + Path( + Call("cluster is not created because there was an error creating nb", func(ctx context.Context, mck Mock) { + cScope.LinodeClient = mck.NodeBalancerClient + getNB := mck.NodeBalancerClient.EXPECT().ListNodeBalancers(gomock.Any(), gomock.Any()).Return(nil, nil) + mck.NodeBalancerClient.EXPECT().CreateNodeBalancer(gomock.Any(), gomock.Any()). + After(getNB). + Return(nil, errors.New("create NB error")) + }), + Result("create nb error", func(ctx context.Context, mck Mock) { + _, err := reconciler.reconcile(ctx, cScope, logr.Logger{}) + Expect(err.Error()).To(ContainSubstring("create NB error")) + }), + ), + Path( + Call("cluster is not created because nb was nil", func(ctx context.Context, mck Mock) { + cScope.LinodeClient = mck.NodeBalancerClient + getNB := mck.NodeBalancerClient.EXPECT().ListNodeBalancers(gomock.Any(), gomock.Any()).Return(nil, nil) + mck.NodeBalancerClient.EXPECT().CreateNodeBalancer(gomock.Any(), gomock.Any()). + After(getNB). + Return(nil, nil) + }), + Result("created nb is nil", func(ctx context.Context, mck Mock) { + _, err := reconciler.reconcile(ctx, cScope, logr.Logger{}) + Expect(err.Error()).To(ContainSubstring("nodeBalancer created was nil")) + }), + ), + Path( + Call("cluster is not created because nb config was nil", func(ctx context.Context, mck Mock) { + cScope.LinodeClient = mck.NodeBalancerClient + getNB := mck.NodeBalancerClient.EXPECT().ListNodeBalancers(gomock.Any(), gomock.Any()).Return(nil, nil) + mck.NodeBalancerClient.EXPECT().CreateNodeBalancer(gomock.Any(), gomock.Any()). + After(getNB). + Return(&linodego.NodeBalancer{ + ID: nodebalancerID, + IPv4: &controlPlaneEndpointHost, + }, nil) + mck.NodeBalancerClient.EXPECT().CreateNodeBalancerConfig(gomock.Any(), gomock.Any(), gomock.Any()). + After(getNB). + Return(nil, errors.New("nodeBalancer config created was nil")) + }), + Result("created nb config is nil", func(ctx context.Context, mck Mock) { + _, err := reconciler.reconcile(ctx, cScope, logr.Logger{}) + Expect(err.Error()).To(ContainSubstring("nodeBalancer config created was nil")) + }), + ), + Path( + Call("cluster is not created because there is no capl cluster", func(ctx context.Context, mck Mock) { + cScope.LinodeClient = mck.NodeBalancerClient + }), + Result("no capl cluster error", func(ctx context.Context, mck Mock) { + reconciler.Client = k8sClient + _, err := reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(cScope.LinodeCluster), + }) + Expect(err).NotTo(HaveOccurred()) + Expect(linodeCluster.Status.Ready).To(BeFalseBecause("failed to get Cluster/no-capl-cluster: clusters.cluster.x-k8s.io \"no-capl-cluster\" not found")) + }), + ), + ), + Result("resource status is updated and NB is created", func(ctx context.Context, mck Mock) { + _, err := reconciler.reconcile(ctx, cScope, logr.Logger{}) + Expect(err).NotTo(HaveOccurred()) + + By("checking ready conditions") + clusterKey := client.ObjectKeyFromObject(&linodeCluster) + Expect(k8sClient.Get(ctx, clusterKey, &linodeCluster)).To(Succeed()) + Expect(linodeCluster.Status.Ready).To(BeTrue()) + Expect(linodeCluster.Status.Conditions).To(HaveLen(1)) + Expect(linodeCluster.Status.Conditions[0].Type).To(Equal(clusterv1.ReadyCondition)) + + By("checking NB id") + Expect(linodeCluster.Spec.Network.NodeBalancerID).To(Equal(&nodebalancerID)) + + By("checking controlPlaneEndpoint/NB host and port") + Expect(linodeCluster.Spec.ControlPlaneEndpoint.Host).To(Equal(controlPlaneEndpointHost)) + Expect(linodeCluster.Spec.ControlPlaneEndpoint.Port).To(Equal(int32(controlPlaneEndpointPort))) + }), + ) }) -var _ = Describe("no-ctrl-plane-endpt", Ordered, Label("cluster", "no-ctrl-plane-endpt"), func() { - var reconciler *LinodeClusterReconciler - clusterName := "no-ctrl-plane-endpt" +var _ = Describe("cluster-delete", Ordered, Label("cluster", "cluster-delete"), func() { + nodebalancerID := 1 + clusterName := "cluster-delete" clusterNameSpace := "default" ownerRef := metav1.OwnerReference{ Name: clusterName, @@ -234,69 +206,72 @@ var _ = Describe("no-ctrl-plane-endpt", Ordered, Label("cluster", "no-ctrl-plane OwnerReferences: ownerRefs, } - caplCluster := clusterv1.Cluster{ - ObjectMeta: metadata, - Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - Kind: "LinodeCluster", - Name: clusterName, - Namespace: clusterNameSpace, - }, - ControlPlaneRef: &corev1.ObjectReference{ - Kind: "KubeadmControlPlane", - Name: "lifecycle-control-plane", - Namespace: clusterNameSpace, - }, - }, - } - linodeCluster := infrav1.LinodeCluster{ ObjectMeta: metadata, Spec: infrav1.LinodeClusterSpec{ Region: "us-ord", + Network: infrav1.NetworkSpec{ + NodeBalancerID: &nodebalancerID, + }, }, } - // Create a recorder with a buffered channel for consuming event strings. - recorder := record.NewFakeRecorder(10) + ctlrSuite := NewControllerSuite( + GinkgoT(), + mock.MockLinodeNodeBalancerClient{}, + mock.MockK8sClient{}, + ) + reconciler := LinodeClusterReconciler{} - BeforeEach(func() { - reconciler = &LinodeClusterReconciler{ - Client: k8sClient, - Recorder: recorder, - LinodeApiKey: "test-key", - } - }) + cScope := &scope.ClusterScope{ + LinodeCluster: &linodeCluster, + } - AfterEach(func() { - // Flush the channel if any events were not consumed. - for len(recorder.Events) > 0 { - <-recorder.Events - } + ctlrSuite.BeforeEach(func(ctx context.Context, mck Mock) { + reconciler.Recorder = mck.Recorder() }) - It("should fail creating cluster", func(ctx SpecContext) { - clusterKey := client.ObjectKeyFromObject(&linodeCluster) - Expect(k8sClient.Create(ctx, &caplCluster)).To(Succeed()) - Expect(k8sClient.Create(ctx, &linodeCluster)).To(Succeed()) - _, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: clusterKey, - }) - Expect(err).To(HaveOccurred()) - - By("checking ready conditions") - Expect(k8sClient.Get(ctx, clusterKey, &linodeCluster)).To(Succeed()) - Expect(linodeCluster.Status.Ready).To(BeFalse()) - Expect(linodeCluster.Status.Conditions).To(HaveLen(1)) - Expect(linodeCluster.Status.Conditions[0].Type).To(Equal(clusterv1.ReadyCondition)) - - By("checking controlPlaneEndpoint host") - Expect(linodeCluster.Spec.ControlPlaneEndpoint.Host).To(Equal("")) - - By("checking nb id to be nil") - Expect(linodeCluster.Spec.Network.NodeBalancerID).To(BeNil()) - - By("recording the expected events") - Expect(<-recorder.Events).To(ContainSubstring("Warning CreateError")) - }) + ctlrSuite.Run( + OneOf( + Path( + Call("cluster is deleted", func(ctx context.Context, mck Mock) { + cScope.LinodeClient = mck.NodeBalancerClient + cScope.Client = mck.K8sClient + mck.NodeBalancerClient.EXPECT().DeleteNodeBalancer(gomock.Any(), gomock.Any()).Return(nil) + }), + ), + Path( + Call("nothing to do because NB ID is nil", func(ctx context.Context, mck Mock) { + cScope.Client = mck.K8sClient + cScope.LinodeClient = mck.NodeBalancerClient + cScope.LinodeCluster.Spec.Network.NodeBalancerID = nil + }), + Result("nothing to do because NB ID is nil", func(ctx context.Context, mck Mock) { + reconciler.Client = mck.K8sClient + err := reconciler.reconcileDelete(ctx, logr.Logger{}, cScope) + Expect(err).NotTo(HaveOccurred()) + Expect(mck.Events()).To(ContainSubstring("Warning NodeBalancerIDMissing NodeBalancer ID is missing, nothing to do")) + }), + ), + Path( + Call("cluster not deleted because the nb can't be deleted", func(ctx context.Context, mck Mock) { + cScope.LinodeClient = mck.NodeBalancerClient + cScope.Client = mck.K8sClient + cScope.LinodeCluster.Spec.Network.NodeBalancerID = &nodebalancerID + mck.NodeBalancerClient.EXPECT().DeleteNodeBalancer(gomock.Any(), gomock.Any()).Return(errors.New("delete NB error")) + }), + Result("cluster not deleted because the nb can't be deleted", func(ctx context.Context, mck Mock) { + reconciler.Client = mck.K8sClient + err := reconciler.reconcileDelete(ctx, logr.Logger{}, cScope) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("delete NB error")) + }), + ), + ), + Result("cluster deleted", func(ctx context.Context, mck Mock) { + reconciler.Client = mck.K8sClient + err := reconciler.reconcileDelete(ctx, logr.Logger{}, cScope) + Expect(err).NotTo(HaveOccurred()) + }), + ) })