Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test: Add clusterctl upgrade e2e test #138

Merged
merged 1 commit into from
Aug 30, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 2 additions & 6 deletions test/e2e/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,6 @@ To run a specific e2e test, such as `[PR-Blocking]`, use the `GINKGO_FOCUS` envi
```shell
make GINKGO_FOCUS="\\[PR-Blocking\\]" test-e2e # only run e2e test with `[PR-Blocking]` in its spec name
```
### Run the e2e test with tilt
It is quite useful to run the e2e test with [tilt](https://cluster-api.sigs.k8s.io/developer/tilt), so that you will not need to rebuild docker image with `make docker-build-e2e` everytime. Also you will not need to wait a new cluster creation and setup. If you have set up your tilt cluster and made the current context points to this cluster, you could run:
```shell
# running e2e for the cluster pointed by the current context
make USE_EXISTING_CLUSTER=true test-e2e
```
## Develop an e2e test
You could refer to [Developing E2E tests](https://cluster-api.sigs.k8s.io/developer/e2e) for a complete guide for developing e2e tests.

Expand All @@ -32,3 +26,5 @@ A guide for developing a k3s e2e test:

## Troubleshooting
* [Cluster API with Docker - "too many open files".](https://cluster-api.sigs.k8s.io/user/troubleshooting.html?highlight=too%20many#cluster-api-with-docker----too-many-open-files)
* invalid provider metadata
* If you see the error `invalid provider metadata: version v1.8.99 for the provider capd-system/infrastructure-docker does not match any release series`, it might be that the artifact you are using is outdated. Please remove the `_artifacts` folder and try again.
164 changes: 164 additions & 0 deletions test/e2e/clusterctl_upgrade_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,164 @@
//go:build e2e
// +build e2e

/*
Copyright 2021 The Kubernetes Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package e2e

import (
"fmt"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gstruct"

"k8s.io/utils/ptr"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
capi_e2e "sigs.k8s.io/cluster-api/test/e2e"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/controller-runtime/pkg/client"
)

const (
e2eOldLabelName = "Cluster.topology.controlPlane.oldLabel"
e2eOldAnnotationName = "Cluster.topology.controlPlane.oldAnnotation"
e2eNewAnnotationValue = "newAnnotationValue"
kcpManagerName = "capi-kthreescontrolplane"
)

var (
clusterctlDownloadURL = "https://github.com/kubernetes-sigs/cluster-api/releases/download/v%s/clusterctl-{OS}-{ARCH}"
providerCAPIPrefix = "cluster-api:v%s"
providerKThreesPrefix = "k3s:v%s"
providerDockerPrefix = "docker:v%s"
)

var _ = Describe("When testing clusterctl upgrades using ClusterClass (v0.2.0=>current) [ClusterClass]", func() {
// Upgrade from v0.2.0 to current (current version is built from src).
var (
specName = "clusterctl-upgrade"
version = "0.2.0"
k3sCapiUpgradedVersion string
capiCoreVersion string
capiCoreUpgradedVersion string
)
BeforeEach(func() {
Expect(e2eConfig.Variables).To(HaveKey(K3sCapiCurrentVersion))
Expect(e2eConfig.Variables).To(HaveKey(CapiCoreVersion))

// Will upgrade k3s CAPI from v0.2.0 to k3sCapiUpgradedVersion.
k3sCapiUpgradedVersion = e2eConfig.GetVariable(K3sCapiCurrentVersion)

// Will init other CAPI core/CAPD componenets with CapiCoreVersion, and then upgrade to CapiCoreUpgradedVersion.
// For now, this two versions are equal.
capiCoreVersion = e2eConfig.GetVariable(CapiCoreVersion)
capiCoreUpgradedVersion = capiCoreVersion
})

capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput {
return capi_e2e.ClusterctlUpgradeSpecInput{
E2EConfig: e2eConfig,
ClusterctlConfigPath: clusterctlConfigPath,
BootstrapClusterProxy: bootstrapClusterProxy,
ArtifactFolder: artifactFolder,
SkipCleanup: skipCleanup,
InfrastructureProvider: ptr.To("docker"),
InitWithBinary: fmt.Sprintf(clusterctlDownloadURL, capiCoreVersion),
InitWithCoreProvider: fmt.Sprintf(providerCAPIPrefix, capiCoreVersion),
InitWithBootstrapProviders: []string{fmt.Sprintf(providerKThreesPrefix, version)},
InitWithControlPlaneProviders: []string{fmt.Sprintf(providerKThreesPrefix, version)},
InitWithInfrastructureProviders: []string{fmt.Sprintf(providerDockerPrefix, capiCoreVersion)},
InitWithProvidersContract: "v1beta1",
// InitWithKubernetesVersion is for the management cluster, WorkloadKubernetesVersion is for the workload cluster.
// Hardcoding the versions as later versions of k3s might not be compatible with the older versions of CAPI k3s.
InitWithKubernetesVersion: "v1.30.0",
WorkloadKubernetesVersion: "v1.30.2+k3s2",
MgmtFlavor: "topology",
WorkloadFlavor: "topology",
UseKindForManagementCluster: true,
// Configuration for the provider upgrades.
Upgrades: []capi_e2e.ClusterctlUpgradeSpecInputUpgrade{
{
// CAPI core or CAPD with compatible version.
CoreProvider: fmt.Sprintf(providerCAPIPrefix, capiCoreUpgradedVersion),
InfrastructureProviders: []string{fmt.Sprintf(providerDockerPrefix, capiCoreUpgradedVersion)},
// Upgrade to current k3s.
BootstrapProviders: []string{fmt.Sprintf(providerKThreesPrefix, k3sCapiUpgradedVersion)},
ControlPlaneProviders: []string{fmt.Sprintf(providerKThreesPrefix, k3sCapiUpgradedVersion)},
},
},
// After the k3s CAPI upgrade, will test the inplace mutable fields
// could be updated correctly. This is in complement to the
// inplace_rollout_test to include the k3s CAPI upgrade scenario.
// We are testing upgrading from v0.2.0 as we do not support SSA
// before v0.2.0.
PostUpgrade: func(managementClusterProxy framework.ClusterProxy, clusterNamespace, clusterName string) {
clusterList := &clusterv1.ClusterList{}
mgmtClient := managementClusterProxy.GetClient()

if err := mgmtClient.List(ctx, clusterList, client.InNamespace(clusterNamespace)); err != nil {
Expect(err).NotTo(HaveOccurred())
}
Expect(clusterList.Items).To(HaveLen(1), fmt.Sprintf("Expected to have only one cluster in the namespace %s", clusterNamespace))

cluster := &clusterList.Items[0]

Byf("Waiting the new controller to reconcile at least once, to set the managed fields with k3s kcpManagerName for all control plane machines.")
Eventually(func(g Gomega) {
controlPlaneMachineList := &clusterv1.MachineList{}
g.Expect(mgmtClient.List(ctx, controlPlaneMachineList, client.InNamespace(clusterNamespace), client.MatchingLabels{
clusterv1.MachineControlPlaneLabel: "",
clusterv1.ClusterNameLabel: cluster.Name,
})).To(Succeed())
for _, m := range controlPlaneMachineList.Items {
g.Expect(m.ObjectMeta.ManagedFields).To(ContainElement(MatchFields(IgnoreExtras, Fields{
"Manager": Equal(kcpManagerName),
})))
}
}, e2eConfig.GetIntervals(specName, "wait-control-plane")...).Should(Succeed())

Byf("Modifying the control plane label and annotations of Cluster %s", cluster.Name)
topologyControlPlane := cluster.Spec.Topology.ControlPlane
Expect(topologyControlPlane.Metadata.Labels).To(HaveKey(e2eOldLabelName))
Expect(topologyControlPlane.Metadata.Annotations).To(HaveKey(e2eOldAnnotationName))

patchHelper, err := patch.NewHelper(cluster, mgmtClient)
Expect(err).ToNot(HaveOccurred())

// Remove old label, and set an old annotation with new value.
delete(topologyControlPlane.Metadata.Labels, e2eOldLabelName)
topologyControlPlane.Metadata.Annotations[e2eOldAnnotationName] = e2eNewAnnotationValue

Expect(patchHelper.Patch(ctx, cluster)).To(Succeed())

Byf("Waiting for labels and annotations of all controlplane machines to be updated.")
Eventually(func(g Gomega) {
controlPlaneMachineList := &clusterv1.MachineList{}
g.Expect(mgmtClient.List(ctx, controlPlaneMachineList, client.InNamespace(clusterNamespace), client.MatchingLabels{
clusterv1.MachineControlPlaneLabel: "",
clusterv1.ClusterNameLabel: cluster.Name,
})).To(Succeed())
for _, m := range controlPlaneMachineList.Items {
g.Expect(m.ObjectMeta.Labels).NotTo(HaveKey(e2eOldLabelName))
g.Expect(m.ObjectMeta.Annotations).To(HaveKeyWithValue(e2eOldAnnotationName, e2eNewAnnotationValue))
}
}, e2eConfig.GetIntervals(specName, "wait-control-plane")...).Should(Succeed())
},
}
})
})
2 changes: 2 additions & 0 deletions test/e2e/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@ const (
WorkersMachineTemplateUpgradeTo = "WORKERS_MACHINE_TEMPLATE_UPGRADE_TO"
IPFamily = "IP_FAMILY"
KindImageVersion = "KIND_IMAGE_VERSION"
CapiCoreVersion = "CAPI_CORE_VERSION"
K3sCapiCurrentVersion = "K3S_CAPI_CURRENT_VERSION"
)

func Byf(format string, a ...interface{}) {
Expand Down
71 changes: 39 additions & 32 deletions test/e2e/config/k3s-docker.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,10 @@ providers:
- name: cluster-api
type: CoreProvider
versions:
- name: v1.7.2
value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.2/core-components.yaml
- name: v1.8.1
value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.8.1/core-components.yaml
type: url
contract: v1beta1
files:
- sourcePath: "../data/shared/v1beta1/metadata.yaml"
replacements:
Expand All @@ -26,25 +27,12 @@ providers:
- name: docker
type: InfrastructureProvider
versions:
# By default, will use the latest version defined in ../data/shared/v1beta1/metadata.yaml
# Will use the latest version defined in ../data/shared/v1beta1/metadata.yaml
# to init the management cluster
- name: v1.7.2 # used during e2e-test
value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.2/infrastructure-components-development.yaml
type: url
files:
- sourcePath: "../data/shared/v1beta1/metadata.yaml"
replacements:
- old: "imagePullPolicy: Always"
new: "imagePullPolicy: IfNotPresent"

# Add v1.8.99 to support tilt (not presented in ../data/shared/v1beta1/metadata.yaml)
# when bootstrapping with tilt, it will use
# the defaultProviderVersion in https://github.com/kubernetes-sigs/cluster-api/blob/main/hack/tools/internal/tilt-prepare/main.go as
# default version for docker infrastructure provider
# name here should match defaultProviderVersion
- name: v1.8.99 # next; use manifest from source files
value: https://github.com/kubernetes-sigs/cluster-api/releases/latest/download/infrastructure-components-development.yaml
- name: v1.8.1 # used during e2e-test
value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.8.1/infrastructure-components-development.yaml
type: url
contract: v1beta1
files:
- sourcePath: "../data/shared/v1beta1/metadata.yaml"
replacements:
Expand All @@ -60,31 +48,50 @@ providers:
- name: k3s
type: BootstrapProvider
versions:
# Could add older release version for upgrading test, but
# by default, will only use the latest version defined in
# ${ProjectRoot}/metadata.yaml to init the management cluster
# Older release is added for k3s provider upgrading test (clusterctl_upgrade_test),
# all other tests will use the k3s build from source
# to init the management cluster.
- name: "v0.2.0"
value: "https://github.com/k3s-io/cluster-api-k3s/releases/download/v0.2.0/bootstrap-components.yaml"
type: "url"
contract: v1beta1
files:
- sourcePath: "../data/shared/k3s/v0.2/metadata.yaml"
targetName: "metadata.yaml"
# By default, will only use the latest version defined in
# ${ProjectRoot}/metadata.yaml (this one) to init the management cluster
# this version should be updated when ${ProjectRoot}/metadata.yaml
# is modified
- name: v0.2.99 # next; use manifest from source files
value: "../../../bootstrap/config/default"
files:
- sourcePath: "../../../metadata.yaml"
targetName: "metadata.yaml"
files:
- sourcePath: "../../../metadata.yaml"
targetName: "metadata.yaml"
- name: k3s
type: ControlPlaneProvider
versions:
- name: "v0.2.0"
value: "https://github.com/k3s-io/cluster-api-k3s/releases/download/v0.2.0/control-plane-components.yaml"
type: "url"
contract: v1beta1
files:
- sourcePath: "../data/shared/k3s/v0.2/metadata.yaml"
targetName: "metadata.yaml"
- name: v0.2.99 # next; use manifest from source files
value: "../../../controlplane/config/default"
files:
- sourcePath: "../../../metadata.yaml"
targetName: "metadata.yaml"
files:
- sourcePath: "../../../metadata.yaml"
targetName: "metadata.yaml"

variables:
KUBERNETES_VERSION_MANAGEMENT: "v1.28.0"
KUBERNETES_VERSION: "v1.28.6+k3s2"
KUBERNETES_VERSION_UPGRADE_TO: "v1.28.7+k3s1"
KUBERNETES_VERSION_MANAGEMENT: "v1.30.0"
KUBERNETES_VERSION: "v1.30.2+k3s2"
KUBERNETES_VERSION_UPGRADE_TO: "v1.30.3+k3s1"
IP_FAMILY: "IPv4"
KIND_IMAGE_VERSION: "v1.28.0"
KIND_IMAGE_VERSION: "v1.30.0"
# Used during clusterctl upgrade test
CAPI_CORE_VERSION: "1.8.1"
K3S_CAPI_CURRENT_VERSION: "0.2.99"
# Enabling the feature flags by setting the env variables.
CLUSTER_TOPOLOGY: "true"
EXP_MACHINE_POOL: "true"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,14 @@ spec:
class: k3s
version: ${KUBERNETES_VERSION}
controlPlane:
metadata:
# These labels are used by clusterctl_upgrade_test to test
# labels added previous to supporting SSA could be modified
# or deleted.
labels:
Cluster.topology.controlPlane.oldLabel: "Cluster.topology.controlPlane.oldLabelValue"
annotations:
Cluster.topology.controlPlane.oldAnnotation: "Cluster.topology.controlPlane.oldAnnotationValue"
nodeDeletionTimeout: "30s"
nodeVolumeDetachTimeout: "5m"
replicas: ${CONTROL_PLANE_MACHINE_COUNT}
Expand Down
9 changes: 9 additions & 0 deletions test/e2e/data/shared/k3s/v0.2/metadata.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3
kind: Metadata
releaseSeries:
- major: 0
minor: 1
contract: v1beta1
- major: 0
minor: 2
contract: v1beta1
2 changes: 1 addition & 1 deletion test/e2e/data/shared/v1beta1/metadata.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,5 +5,5 @@ apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3
kind: Metadata
releaseSeries:
- major: 1
minor: 7
minor: 8
contract: v1beta1
6 changes: 3 additions & 3 deletions test/e2e/inplace_rollout_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ import (
// setting on ControlPlane object could be rollout to underlying machines.
// The original test does not apply to k3s cluster as it modified controlPlane fields specific to KubeadmControlPlane.
// Link to CAPI clusterclass_rollout test: https://github.com/kubernetes-sigs/cluster-api/blob/main/test/e2e/clusterclass_rollout.go
var _ = Describe("Inplace mutable fields rollout test", func() {
var _ = Describe("Inplace mutable fields rollout test [ClusterClass]", func() {
var (
ctx = context.TODO()
specName = "inplace-rollout"
Expand Down Expand Up @@ -151,7 +151,7 @@ type modifyControlPlaneViaClusterAndWaitInput struct {
}

// modifyControlPlaneViaClusterAndWait modifies the ControlPlaneTopology of a Cluster topology via ModifyControlPlaneTopology.
// It then waits until the changes are rolled out to the ControlPlane of the Cluster.
// It then waits until the changes are rolled out to the ControlPlane and ControlPlane Machine of the Cluster.
func modifyControlPlaneViaClusterAndWait(ctx context.Context, input modifyControlPlaneViaClusterAndWaitInput) {
Expect(ctx).NotTo(BeNil(), "ctx is required for modifyControlPlaneViaClusterAndWait")
Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling modifyControlPlaneViaClusterAndWait")
Expand All @@ -167,7 +167,7 @@ func modifyControlPlaneViaClusterAndWait(ctx context.Context, input modifyContro
input.ModifyControlPlaneTopology(&input.Cluster.Spec.Topology.ControlPlane)
Expect(patchHelper.Patch(ctx, input.Cluster)).To(Succeed())

// NOTE: We only wait until the change is rolled out to the control plane object and not to the control plane machines.
// NOTE: We wait until the change is rolled out to the control plane object and the control plane machines.
Byf("Waiting for control plane rollout to complete.")
Eventually(func(g Gomega) {
// Get the ControlPlane.
Expand Down
Loading