Skip to content

Commit

Permalink
Introduce flags for speeding up test execution with "kind" (#858)
Browse files Browse the repository at this point in the history
* Lower initial delay seconds if we're using the dummy container

* Remove unused ca-cert volume from cluster pods

* Remove unused install_go function

* Introduce USE_CERTMANAGER and PRESERVE_KIND_CLUSTER to kind test execution and skip downloading binaries if already present

USE_CERTMANAGER (default="true") can be set to "false" to disable
cert-manager installation to speed up test execution.

PRESERVE_KIND_CLUSTER (default="false") can be set to "true" to keep the
kind cluster around after test execution so that the consecutive runs
will reuse the kind cluster and existing helm installs.
This also fixes a bunch of test cases where tests didn't properly clean
up, causing conflicts on consecutive test runs.

* Add fake license string to allow running tests with envtest or dummy image without a valid license
  • Loading branch information
SaaldjorMike authored Sep 25, 2024
1 parent 3167d2e commit e7f6683
Show file tree
Hide file tree
Showing 15 changed files with 294 additions and 128 deletions.
2 changes: 0 additions & 2 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,6 @@ jobs:
- shell: bash
run: |
make test
env:
HUMIO_E2E_LICENSE: ${{ secrets.HUMIO_E2E_LICENSE }}
- name: Publish Test Report
uses: mikepenz/action-junit-report@v4
if: always() # always run even if the previous step fails
Expand Down
1 change: 0 additions & 1 deletion .github/workflows/e2e-dummy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ jobs:
- name: run e2e tests
env:
BIN_DIR: ${{ steps.bin_dir.outputs.BIN_DIR }}
HUMIO_E2E_LICENSE: ${{ secrets.HUMIO_E2E_LICENSE }}
E2E_KIND_K8S_VERSION: ${{ matrix.kind-k8s-version }}
E2E_LOGS_HUMIO_HOSTNAME: ${{ secrets.E2E_LOGS_HUMIO_HOSTNAME }}
E2E_LOGS_HUMIO_INGEST_TOKEN: ${{ secrets.E2E_LOGS_HUMIO_INGEST_TOKEN }}
Expand Down
3 changes: 0 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -49,9 +49,6 @@ vet: ## Run go vet against code.
go vet ./...

test: manifests generate fmt vet ginkgo ## Run tests.
ifndef HUMIO_E2E_LICENSE
$(error HUMIO_E2E_LICENSE not set)
endif
go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
$(SHELL) -c "\
eval \$$($(GOBIN)/setup-envtest use -p env ${TEST_K8S_VERSION}); \
Expand Down
6 changes: 5 additions & 1 deletion controllers/humiocluster_defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -561,7 +561,7 @@ func (hnp *HumioNodePool) GetContainerReadinessProbe() *corev1.Probe {
}

if hnp.humioNodeSpec.ContainerReadinessProbe == nil {
return &corev1.Probe{
probe := &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/api/v1/is-node-up",
Expand All @@ -575,6 +575,10 @@ func (hnp *HumioNodePool) GetContainerReadinessProbe() *corev1.Probe {
SuccessThreshold: 1,
FailureThreshold: 10,
}
if os.Getenv("DUMMY_LOGSCALE_IMAGE") == "true" {
probe.InitialDelaySeconds = 0
}
return probe
}
return hnp.humioNodeSpec.ContainerReadinessProbe
}
Expand Down
16 changes: 0 additions & 16 deletions controllers/humiocluster_pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -452,22 +452,6 @@ func ConstructPod(hnp *HumioNodePool, humioNodeName string, attachments *podAtta
},
},
})
pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{
Name: "ca-cert",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: hnp.GetClusterName(),
DefaultMode: &mode,
Items: []corev1.KeyToPath{
{
Key: "ca.crt",
Path: "certs/ca-bundle.crt",
Mode: &mode,
},
},
},
},
})
}

if attachments.bootstrapTokenSecretReference.hash != "" {
Expand Down
120 changes: 69 additions & 51 deletions controllers/suite/clusters/humiocluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ var _ = Describe("HumioCluster Controller", func() {
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2"))
}

if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
if helpers.TLSEnabled(&updatedHumioCluster) {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
Expand Down Expand Up @@ -462,7 +462,7 @@ var _ = Describe("HumioCluster Controller", func() {
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2"))
}

if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
if helpers.TLSEnabled(&updatedHumioCluster) {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
Expand Down Expand Up @@ -556,7 +556,7 @@ var _ = Describe("HumioCluster Controller", func() {
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2"))
}

if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
if helpers.TLSEnabled(&updatedHumioCluster) {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
Expand Down Expand Up @@ -631,7 +631,7 @@ var _ = Describe("HumioCluster Controller", func() {
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2"))
}

if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
if helpers.TLSEnabled(&updatedHumioCluster) {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
Expand Down Expand Up @@ -707,16 +707,16 @@ var _ = Describe("HumioCluster Controller", func() {
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2"))
}

if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
if helpers.TLSEnabled(&updatedHumioCluster) {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
})
})

Context("Humio Cluster Update EXTERNAL_URL", Label("envtest", "dummy", "real"), func() {
Context("Humio Cluster Update EXTERNAL_URL", Label("dummy", "real"), func() {
It("Update should correctly replace pods to use the new EXTERNAL_URL in a non-rolling fashion", func() {
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
if helpers.UseCertManager() {
key := types.NamespacedName{
Name: "humiocluster-update-ext-url",
Namespace: testProcessNamespace,
Expand Down Expand Up @@ -957,7 +957,7 @@ var _ = Describe("HumioCluster Controller", func() {
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2"))
}

if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
if helpers.TLSEnabled(&updatedHumioCluster) {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
Expand Down Expand Up @@ -1063,7 +1063,7 @@ var _ = Describe("HumioCluster Controller", func() {
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2"))
}

if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
if helpers.TLSEnabled(&updatedHumioCluster) {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
Expand Down Expand Up @@ -1177,7 +1177,7 @@ var _ = Describe("HumioCluster Controller", func() {
Expect(pod.Annotations[controllers.PodRevisionAnnotation]).To(Equal("3"))
}

if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
if helpers.TLSEnabled(&updatedHumioCluster) {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
Expand Down Expand Up @@ -1242,7 +1242,7 @@ var _ = Describe("HumioCluster Controller", func() {

updatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())

if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
if helpers.TLSEnabled(&updatedHumioCluster) {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
Expand Down Expand Up @@ -1314,7 +1314,7 @@ var _ = Describe("HumioCluster Controller", func() {

updatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())

if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
if helpers.TLSEnabled(&updatedHumioCluster) {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
Expand Down Expand Up @@ -1444,7 +1444,7 @@ var _ = Describe("HumioCluster Controller", func() {
}, testTimeout, suite.TestInterval).Should(BeTrue())

updatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster).GetPodLabels())
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
if helpers.TLSEnabled(&updatedHumioCluster) {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
Expand Down Expand Up @@ -1666,7 +1666,7 @@ var _ = Describe("HumioCluster Controller", func() {
}, testTimeout, suite.TestInterval).Should(BeTrue())

updatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, mainNodePoolManager.GetPodLabels())
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
if helpers.TLSEnabled(&updatedHumioCluster) {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
Expand Down Expand Up @@ -1767,7 +1767,7 @@ var _ = Describe("HumioCluster Controller", func() {
}, testTimeout, suite.TestInterval).Should(BeTrue())

updatedClusterPods, _ = kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, additionalNodePoolManager.GetPodLabels())
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
if helpers.TLSEnabled(&updatedHumioCluster) {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
Expand Down Expand Up @@ -3337,20 +3337,24 @@ var _ = Describe("HumioCluster Controller", func() {
defer suite.CleanupCluster(ctx, k8sClient, toCreate)

initialExpectedVolumesCount := 5
initialExpectedVolumeMountsCount := 4
initialExpectedHumioContainerVolumeMountsCount := 4

if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
// if we run on a real cluster we have TLS enabled (using 2 volumes),
// and k8s will automatically inject a service account token adding one more
initialExpectedVolumesCount += 3
initialExpectedVolumeMountsCount += 2
// k8s will automatically inject a service account token
initialExpectedVolumesCount += 1 // kube-api-access-<ID>
initialExpectedHumioContainerVolumeMountsCount += 1 // kube-api-access-<ID>

if helpers.UseCertManager() {
initialExpectedVolumesCount += 1 // tls-cert
initialExpectedHumioContainerVolumeMountsCount += 1 // tls-cert
}
}

clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
Expect(pod.Spec.Volumes).To(HaveLen(initialExpectedVolumesCount))
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIdx].VolumeMounts).To(HaveLen(initialExpectedVolumeMountsCount))
Expect(pod.Spec.Containers[humioIdx].VolumeMounts).To(HaveLen(initialExpectedHumioContainerVolumeMountsCount))
}

suite.UsingClusterBy(key.Name, "Adding additional volumes")
Expand Down Expand Up @@ -3395,7 +3399,7 @@ var _ = Describe("HumioCluster Controller", func() {
return pod.Spec.Containers[humioIdx].VolumeMounts
}
return []corev1.VolumeMount{}
}, testTimeout, suite.TestInterval).Should(HaveLen(initialExpectedVolumeMountsCount + 1))
}, testTimeout, suite.TestInterval).Should(HaveLen(initialExpectedHumioContainerVolumeMountsCount + 1))
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
Expect(pod.Spec.Volumes).Should(ContainElement(extraVolume))
Expand All @@ -3416,7 +3420,7 @@ var _ = Describe("HumioCluster Controller", func() {
Type: humiov1alpha1.HumioClusterUpdateStrategyRollingUpdate,
}
protocol := "http"
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
if helpers.TLSEnabled(toCreate) {
protocol = "https"
}

Expand Down Expand Up @@ -3815,39 +3819,40 @@ var _ = Describe("HumioCluster Controller", func() {
})
})

Context("Humio Cluster with additional hostnames for TLS", Label("envtest", "dummy", "real"), func() {
Context("Humio Cluster with additional hostnames for TLS", Label("dummy", "real"), func() {
It("Creating cluster with additional hostnames for TLS", func() {
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
key := types.NamespacedName{
Name: "humiocluster-tls-additional-hostnames",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.TLS = &humiov1alpha1.HumioClusterTLSSpec{
Enabled: helpers.BoolPtr(true),
ExtraHostnames: []string{
"something.additional",
"yet.another.something.additional",
},
}
key := types.NamespacedName{
Name: "humiocluster-tls-additional-hostnames",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
if !helpers.TLSEnabled(toCreate) {
return
}
toCreate.Spec.TLS = &humiov1alpha1.HumioClusterTLSSpec{
Enabled: helpers.BoolPtr(true),
ExtraHostnames: []string{
"something.additional",
"yet.another.something.additional",
},
}

suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, testHumioClient, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, testHumioClient, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)

suite.UsingClusterBy(key.Name, "Confirming certificate objects contain the additional hostnames")
suite.UsingClusterBy(key.Name, "Confirming certificate objects contain the additional hostnames")

Eventually(func() ([]cmapi.Certificate, error) {
return kubernetes.ListCertificates(ctx, k8sClient, toCreate.Namespace, kubernetes.MatchingLabelsForHumio(toCreate.Name))
}, testTimeout, suite.TestInterval).Should(HaveLen(2))
Eventually(func() ([]cmapi.Certificate, error) {
return kubernetes.ListCertificates(ctx, k8sClient, toCreate.Namespace, kubernetes.MatchingLabelsForHumio(toCreate.Name))
}, testTimeout, suite.TestInterval).Should(HaveLen(2))

var certificates []cmapi.Certificate
certificates, err = kubernetes.ListCertificates(ctx, k8sClient, toCreate.Namespace, kubernetes.MatchingLabelsForHumio(toCreate.Name))
Expect(err).To(Succeed())
for _, certificate := range certificates {
Expect(certificate.Spec.DNSNames).Should(ContainElements(toCreate.Spec.TLS.ExtraHostnames))
}
var certificates []cmapi.Certificate
certificates, err = kubernetes.ListCertificates(ctx, k8sClient, toCreate.Namespace, kubernetes.MatchingLabelsForHumio(toCreate.Name))
Expect(err).To(Succeed())
for _, certificate := range certificates {
Expect(certificate.Spec.DNSNames).Should(ContainElements(toCreate.Spec.TLS.ExtraHostnames))
}
})
})
Expand Down Expand Up @@ -4362,6 +4367,19 @@ var _ = Describe("HumioCluster Controller", func() {
for _, pod := range clusterPods {
Expect(pod.Spec.PriorityClassName).To(Equal(toCreate.Spec.PriorityClassName))
}

Expect(k8sClient.Delete(context.TODO(), priorityClass)).To(Succeed())

Eventually(func() bool {
return k8serrors.IsNotFound(k8sClient.Get(
context.TODO(),
types.NamespacedName{
Namespace: priorityClass.Namespace,
Name: priorityClass.Name,
},
priorityClass),
)
}, testTimeout, suite.TestInterval).Should(BeTrue())
})
})

Expand Down
4 changes: 2 additions & 2 deletions controllers/suite/clusters/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,8 +84,6 @@ var _ = BeforeSuite(func() {
log = zapr.NewLogger(zapLog)
logf.SetLogger(log)

Expect(os.Getenv("HUMIO_E2E_LICENSE")).NotTo(BeEmpty())

By("bootstrapping test environment")
useExistingCluster := true
testProcessNamespace = fmt.Sprintf("e2e-clusters-%d", GinkgoParallelProcess())
Expand All @@ -98,6 +96,8 @@ var _ = BeforeSuite(func() {
testHumioClient = humio.NewMockClient()
} else {
testHumioClient = humio.NewClient(log, "")
By("Verifying we have a valid license, as tests will require starting up real LogScale containers")
Expect(os.Getenv("HUMIO_E2E_LICENSE")).NotTo(BeEmpty())
}
} else {
testTimeout = time.Second * 30
Expand Down
15 changes: 11 additions & 4 deletions controllers/suite/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ func ConstructBasicNodeSpecForHumioCluster(key types.NamespacedName) humiov1alph
}
}

if useDockerCredentials() {
if UseDockerCredentials() {
nodeSpec.ImagePullSecrets = []corev1.LocalObjectReference{
{Name: DockerRegistryCredentialsSecretName},
}
Expand Down Expand Up @@ -311,12 +311,19 @@ func ConstructBasicSingleNodeHumioCluster(key types.NamespacedName, useAutoCreat
func CreateLicenseSecret(ctx context.Context, clusterKey types.NamespacedName, k8sClient client.Client, cluster *humiov1alpha1.HumioCluster) {
UsingClusterBy(cluster.Name, fmt.Sprintf("Creating the license secret %s", cluster.Spec.License.SecretKeyRef.Name))

licenseString := "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzUxMiJ9.eyJpc09lbSI6ZmFsc2UsImF1ZCI6Ikh1bWlvLWxpY2Vuc2UtY2hlY2siLCJzdWIiOiJIdW1pbyBFMkUgdGVzdHMiLCJ1aWQiOiJGUXNvWlM3Yk1PUldrbEtGIiwibWF4VXNlcnMiOjEwLCJhbGxvd1NBQVMiOnRydWUsIm1heENvcmVzIjoxLCJ2YWxpZFVudGlsIjoxNzQzMTY2ODAwLCJleHAiOjE3NzQ1OTMyOTcsImlzVHJpYWwiOmZhbHNlLCJpYXQiOjE2Nzk5ODUyOTcsIm1heEluZ2VzdEdiUGVyRGF5IjoxfQ.someinvalidsignature"

// If we use a k8s that is not envtest, and we didn't specify we are using a dummy image, we require a valid license
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" && os.Getenv("DUMMY_LOGSCALE_IMAGE") != "true" {
licenseString = os.Getenv("HUMIO_E2E_LICENSE")
}

licenseSecret := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-license", clusterKey.Name),
Namespace: clusterKey.Namespace,
},
StringData: map[string]string{"license": os.Getenv("HUMIO_E2E_LICENSE")},
StringData: map[string]string{"license": licenseString},
Type: corev1.SecretTypeOpaque,
}
Expect(k8sClient.Create(ctx, &licenseSecret)).To(Succeed())
Expand Down Expand Up @@ -663,13 +670,13 @@ func WaitForReconcileToSync(ctx context.Context, key types.NamespacedName, k8sCl
}, testTimeout, TestInterval).Should(BeNumerically("==", beforeGeneration))
}

func useDockerCredentials() bool {
func UseDockerCredentials() bool {
return os.Getenv(dockerUsernameEnvVar) != "" && os.Getenv(dockerPasswordEnvVar) != "" &&
os.Getenv(dockerUsernameEnvVar) != "none" && os.Getenv(dockerPasswordEnvVar) != "none"
}

func CreateDockerRegredSecret(ctx context.Context, namespace corev1.Namespace, k8sClient client.Client) {
if !useDockerCredentials() {
if !UseDockerCredentials() {
return
}

Expand Down
Loading

0 comments on commit e7f6683

Please sign in to comment.