From 474570671be8781fe3e4f688222fc683ab43bbb8 Mon Sep 17 00:00:00 2001 From: Yang Chiu Date: Tue, 22 Oct 2024 14:08:34 +0800 Subject: [PATCH] ci: support sles for hal cluster Signed-off-by: Yang Chiu --- pipelines/e2e/Jenkinsfile | 15 +- .../terraform/harvester/sles/main.tf | 208 ++++++++++++++++++ .../terraform/harvester/sles/variables.tf | 54 +++++ 3 files changed, 276 insertions(+), 1 deletion(-) create mode 100644 test_framework/terraform/harvester/sles/main.tf create mode 100644 test_framework/terraform/harvester/sles/variables.tf diff --git a/pipelines/e2e/Jenkinsfile b/pipelines/e2e/Jenkinsfile index 7d2dce8568..6f5672b386 100644 --- a/pipelines/e2e/Jenkinsfile +++ b/pipelines/e2e/Jenkinsfile @@ -7,7 +7,20 @@ def BUILD_TRIGGER_BY = "\n${currentBuild.getBuildCauses()[0].shortDescription}" def SELINUX_MODE = params.SELINUX_MODE ? params.SELINUX_MODE : "" def CREDS_ID = JOB_BASE_NAME == "longhorn-tests-regression" ? "AWS_CREDS_RANCHER_QA" : "AWS_CREDS" -def REGISTRATION_CODE_ID = params.ARCH == "amd64" ? "REGISTRATION_CODE" : "REGISTRATION_CODE_ARM64" +def REGISTRATION_CODE_ID = "REGISTRATION_CODE" +if (params.DISTRO == "sles") { + if (params.ARCH == "amd64") { + REGISTRATION_CODE_ID = "SLES_REGISTRATION_CODE" + } else { + REGISTRATION_CODE_ID = "SLES_REGISTRATION_CODE_ARM64" + } +} else if (params.DISTRO == "sle-micro") { + if (params.ARCH == "amd64") { + REGISTRATION_CODE_ID = "REGISTRATION_CODE" + } else { + REGISTRATION_CODE_ID = "REGISTRATION_CODE_ARM64" + } +} // parameters for air gap installation def AIR_GAP_INSTALLATION = params.AIR_GAP_INSTALLATION ? params.AIR_GAP_INSTALLATION : false diff --git a/test_framework/terraform/harvester/sles/main.tf b/test_framework/terraform/harvester/sles/main.tf new file mode 100644 index 0000000000..3774868c19 --- /dev/null +++ b/test_framework/terraform/harvester/sles/main.tf @@ -0,0 +1,208 @@ +terraform { + required_providers { + rancher2 = { + source = "rancher/rancher2" + version = "~> 5.1.0" + } + } +} + +provider "rancher2" { + api_url = var.lab_url + insecure = true + access_key = var.lab_access_key + secret_key = var.lab_secret_key +} + +resource "random_string" "random_suffix" { + length = 8 + special = false + lower = true + upper = false +} + +data "rancher2_cluster_v2" "hal-cluster" { + name = "hal" +} + +resource "rancher2_cloud_credential" "e2e-credential" { + name = "e2e-credential-${random_string.random_suffix.id}" + harvester_credential_config { + cluster_id = data.rancher2_cluster_v2.hal-cluster.cluster_v1_id + cluster_type = "imported" + kubeconfig_content = data.rancher2_cluster_v2.hal-cluster.kube_config + } +} + +resource "rancher2_machine_config_v2" "e2e-machine-config-controlplane" { + + generate_name = "e2e-machine-config-controlplane-${random_string.random_suffix.id}" + + harvester_config { + + vm_namespace = "longhorn-qa" + + cpu_count = "4" + memory_size = "8" + + disk_info = <- + ${file(var.ssh_public_key_file_path)} +runcmd: + - SUSEConnect -r ${var.registration_code} + - zypper install -y qemu-guest-agent iptables + - - systemctl + - enable + - '--now' + - qemu-guest-agent.service +EOF + } +} + +resource "rancher2_machine_config_v2" "e2e-machine-config-worker" { + + generate_name = "e2e-machine-config-worker-${random_string.random_suffix.id}" + + harvester_config { + + vm_namespace = "longhorn-qa" + + cpu_count = "4" + memory_size = "8" + + disk_info = <- + ${file(var.ssh_public_key_file_path)} +runcmd: + - SUSEConnect -r ${var.registration_code} + - zypper install -y qemu-guest-agent iptables open-iscsi nfs-client cryptsetup device-mapper + - zypper -n install --force-resolution kernel-default + - - systemctl + - enable + - '--now' + - qemu-guest-agent.service + - systemctl enable iscsid + - systemctl start iscsid + - touch /etc/modules-load.d/modules.conf + - echo uio >> /etc/modules-load.d/modules.conf + - echo uio_pci_generic >> /etc/modules-load.d/modules.conf + - echo vfio_pci >> /etc/modules-load.d/modules.conf + - echo nvme-tcp >> /etc/modules-load.d/modules.conf + - echo dm_crypt >> /etc/modules-load.d/modules.conf + - echo 1024 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages + - echo "vm.nr_hugepages=1024" >> /etc/sysctl.conf + - shutdown -r +5 +EOF + } +} + +resource "rancher2_cluster_v2" "e2e-cluster" { + + name = "e2e-cluster-${random_string.random_suffix.id}" + + kubernetes_version = var.k8s_distro_version + + rke_config { + machine_pools { + name = "control-plane-pool" + cloud_credential_secret_name = rancher2_cloud_credential.e2e-credential.id + control_plane_role = true + etcd_role = true + worker_role = false + quantity = 1 + machine_config { + kind = rancher2_machine_config_v2.e2e-machine-config-controlplane.kind + name = rancher2_machine_config_v2.e2e-machine-config-controlplane.name + } + } + machine_pools { + name = "worker-pool" + cloud_credential_secret_name = rancher2_cloud_credential.e2e-credential.id + control_plane_role = false + etcd_role = false + worker_role = true + quantity = 3 + machine_config { + kind = rancher2_machine_config_v2.e2e-machine-config-worker.kind + name = rancher2_machine_config_v2.e2e-machine-config-worker.name + } + } + machine_selector_config { + config = <