From 0ce64092418e243b3df16bd0510bdfe126715502 Mon Sep 17 00:00:00 2001 From: Google SAP Deployments Dev Date: Wed, 28 Feb 2024 19:05:21 +0000 Subject: [PATCH 1/8] Project import generated by Copybara. GitOrigin-RevId: 8fd743e752d4ac61cd2ce6c8f48103e39412c880 --- modules/sap_hana/main.tf | 354 +++++++++-- modules/sap_hana/outputs.tf | 1 - modules/sap_hana/variables.tf | 294 ++++++++- modules/sap_hana/versions.tf | 12 +- modules/sap_hana_ha/main.tf | 816 +++++++++++++++++++++++-- modules/sap_hana_ha/outputs.tf | 9 +- modules/sap_hana_ha/variables.tf | 278 ++++++++- modules/sap_hana_ha/versions.tf | 16 +- modules/sap_hana_scaleout/main.tf | 216 +++++-- modules/sap_hana_scaleout/outputs.tf | 1 - modules/sap_hana_scaleout/variables.tf | 163 ++++- modules/sap_hana_scaleout/versions.tf | 10 +- modules/sap_nw/main.tf | 185 ++++++ modules/sap_nw/outputs.tf | 19 + modules/sap_nw/variables.tf | 147 +++++ modules/sap_nw/versions.tf | 21 + modules/sap_nw_ha/main.tf | 425 +++++++++++++ modules/sap_nw_ha/outputs.tf | 47 ++ modules/sap_nw_ha/variables.tf | 352 +++++++++++ modules/sap_nw_ha/versions.tf | 21 + 20 files changed, 3212 insertions(+), 175 deletions(-) create mode 100644 modules/sap_nw/main.tf create mode 100644 modules/sap_nw/outputs.tf create mode 100644 modules/sap_nw/variables.tf create mode 100644 modules/sap_nw/versions.tf create mode 100644 modules/sap_nw_ha/main.tf create mode 100644 modules/sap_nw_ha/outputs.tf create mode 100644 modules/sap_nw_ha/variables.tf create mode 100644 modules/sap_nw_ha/versions.tf diff --git a/modules/sap_hana/main.tf b/modules/sap_hana/main.tf index 5cb90467..a68dbadc 100644 --- a/modules/sap_hana/main.tf +++ b/modules/sap_hana/main.tf @@ -13,12 +13,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - # # Terraform SAP HANA for Google Cloud # -# Version: BUILD.VERSION -# Build Hash: BUILD.HASH +# Version: 2.0.202402230649 +# Build Hash: c745a89b214d491fa9b641e2fff78abfe9965016 # ################################################################################ @@ -31,7 +30,7 @@ locals { "n1-highmem-96" = 624 "n1-megamem-96" = 1433 "n2-highmem-32" = 256 - "n2-highmem-48" = 386 + "n2-highmem-48" = 384 "n2-highmem-64" = 512 "n2-highmem-80" = 640 "n2-highmem-96" = 768 @@ -43,18 +42,22 @@ locals { "m1-ultramem-40" = 961 "m1-ultramem-80" = 1922 "m1-ultramem-160" = 3844 - "m2-ultramem-208" = 5916 - "m2-megamem-416" = 5916 + "m2-ultramem-208" = 5888 + "m2-megamem-416" = 5888 "m2-hypermem-416" = 8832 - "m2-ultramem-416" = 11832 + "m2-ultramem-416" = 11744 "m3-megamem-64" = 976 "m3-megamem-128" = 1952 "m3-ultramem-32" = 976 "m3-ultramem-64" = 1952 "m3-ultramem-128" = 3904 + "c3-standard-44" = 176 + "c3-highmem-44" = 352 + "c3-highmem-88" = 704 + "c3-highmem-176" = 1408 } + cpu_platform_map = { - "n1-standard-16" = "Intel Haswell" "n1-highmem-32" = "Intel Broadwell" "n1-highmem-64" = "Intel Broadwell" "n1-highmem-96" = "Intel Skylake" @@ -81,24 +84,136 @@ locals { "m3-ultramem-32" = "Automatic" "m3-ultramem-64" = "Automatic" "m3-ultramem-128" = "Automatic" + "c3-standard-44" = "Automatic" + "c3-highmem-44" = "Automatic" + "c3-highmem-88" = "Automatic" + "c3-highmem-176" = "Automatic" + } + + + # Minimum disk sizes are used to ensure throughput. Extreme disks don't need this. + # All 'over provisioned' capacity is to go onto the data disk. + min_total_disk_map = { + "pd-ssd" = 550 + "pd-balanced" = 943 + "pd-extreme" = 0 + "hyperdisk-balanced" = 0 + "hyperdisk-extreme" = 0 } + min_total_disk = local.min_total_disk_map[var.disk_type] + mem_size = lookup(local.mem_size_map, var.machine_type, 320) - hana_log_size_min = min(512, max(64, local.mem_size / 2)) - hana_data_size_min = local.mem_size * 12 / 10 + hana_log_size = ceil(min(512, max(64, local.mem_size / 2))) + hana_data_size_min = ceil(local.mem_size * 12 / 10) hana_shared_size_min = min(1024, local.mem_size) + hana_usrsap_size = 32 + + hana_data_size = max(local.hana_data_size_min, local.min_total_disk - local.hana_usrsap_size - local.hana_log_size - local.hana_shared_size_min ) + + # scaleout_nodes > 0 then hana_shared_size and backup is changed; assumes that sap_hana_scaleout_nodes is an integer + hana_shared_size = var.sap_hana_scaleout_nodes > 0 ? local.hana_shared_size_min * ceil(var.sap_hana_scaleout_nodes / 4): local.hana_shared_size_min + backup_size = var.sap_hana_backup_size > 0 ? var.sap_hana_backup_size : 2 * local.mem_size * (var.sap_hana_scaleout_nodes + 1) + + # ensure the combined disk meets minimum size/performance ; + pd_size = ceil(max(local.min_total_disk, local.hana_log_size + local.hana_data_size_min + local.hana_shared_size + local.hana_usrsap_size + 1)) + + # ensure pd-hdd for backup is smaller than the maximum pd size + pd_size_worker = ceil(max(local.min_total_disk, local.hana_log_size + local.hana_data_size_min + local.hana_usrsap_size + 1)) - hana_log_size = local.hana_log_size_min - hana_data_size = local.hana_data_size_min + final_data_disk_type = var.data_disk_type_override == "" ? var.disk_type : var.data_disk_type_override + final_log_disk_type = var.log_disk_type_override == "" ? var.disk_type : var.log_disk_type_override - # scaleout_nodes > 0 then hana_shared_size and pdhdd is changed; assumes that sap_hana_scaleout_nodes is an interger - hana_shared_size = local.hana_shared_size_min * (var.sap_hana_scaleout_nodes > 0 ? ceil(var.sap_hana_scaleout_nodes / 4) : 1) - pdhdd_size_default = var.sap_hana_scaleout_nodes > 0 ? 2 * local.mem_size * (var.sap_hana_scaleout_nodes + 1) : 500 + temp_shared_disk_type = contains(["hyperdisk-extreme", "hyperdisk-balanced", "pd-extreme"], var.disk_type) ? "pd-balanced" : var.disk_type + temp_usrsap_disk_type = contains(["hyperdisk-extreme", "hyperdisk-balanced", "pd-extreme"], var.disk_type) ? "pd-balanced" : var.disk_type - # ensure pd-ssd meets minimum size/performance ; 32 is the min allowed memery and + 1 is there to make sure no undersizing happens - pdssd_size = ceil(max(834, local.hana_log_size + local.hana_data_size + local.hana_shared_size + 32 + 1)) + final_shared_disk_type = var.shared_disk_type_override == "" ? local.temp_shared_disk_type : var.shared_disk_type_override + final_usrsap_disk_type = var.usrsap_disk_type_override == "" ? local.temp_usrsap_disk_type : var.usrsap_disk_type_override + + unified_pd_size = var.unified_disk_size_override == null ? local.pd_size : var.unified_disk_size_override + unified_worker_pd_size = var.unified_worker_disk_size_override == null ? local.pd_size_worker : var.unified_worker_disk_size_override + data_pd_size = var.data_disk_size_override == null ? local.hana_data_size : var.data_disk_size_override + log_pd_size = var.log_disk_size_override == null ? local.hana_log_size : var.log_disk_size_override + shared_pd_size = var.shared_disk_size_override == null ? local.hana_shared_size : var.shared_disk_size_override + usrsap_pd_size = var.usrsap_disk_size_override == null ? local.hana_usrsap_size : var.usrsap_disk_size_override + + # IOPS + hdx_iops_map = { + "data" = max(10000, local.data_pd_size*2) + "log" = max(10000, local.log_pd_size*2) + "shared" = null + "usrsap" = null + "unified" = max(10000, local.data_pd_size*2) + max(10000, local.log_pd_size*2) + "worker" = max(10000, local.data_pd_size*2) + max(10000, local.log_pd_size*2) + "backup" = max(10000, 2 * local.backup_size) + } + hdb_iops_map = { + "data" = var.hyperdisk_balanced_iops_default + "log" = var.hyperdisk_balanced_iops_default + "shared" = null + "usrsap" = null + "unified" = var.hyperdisk_balanced_iops_default + "worker" = var.hyperdisk_balanced_iops_default + "backup" = var.hyperdisk_balanced_iops_default + } + null_iops_map = { + "data" = null + "log" = null + "shared" = null + "usrsap" = null + "unified" = null + "worker" = null + "backup" = null + } + iops_map = { + "pd-ssd" = local.null_iops_map + "pd-balanced" = local.null_iops_map + "pd-extreme" = local.hdx_iops_map + "hyperdisk-balanced" = local.hdb_iops_map + "hyperdisk-extreme" = local.hdx_iops_map + } + + final_data_iops = var.data_disk_iops_override == null ? local.iops_map[local.final_data_disk_type]["data"] : var.data_disk_iops_override + final_log_iops = var.log_disk_iops_override == null ? local.iops_map[local.final_log_disk_type]["log"] : var.log_disk_iops_override + final_shared_iops = var.shared_disk_iops_override == null ? local.iops_map[local.final_shared_disk_type]["shared"] : var.shared_disk_iops_override + final_usrsap_iops = var.usrsap_disk_iops_override == null ? local.iops_map[local.final_usrsap_disk_type]["usrsap"] : var.usrsap_disk_iops_override + final_unified_iops = var.unified_disk_iops_override == null ? local.iops_map[var.disk_type]["unified"] : var.unified_disk_iops_override + final_unified_worker_iops = var.unified_worker_disk_iops_override == null ? local.iops_map[var.disk_type]["worker"] : var.unified_worker_disk_iops_override + final_backup_iops = var.backup_disk_iops_override == null ? local.iops_map[var.backup_disk_type]["backup"] : var.backup_disk_iops_override + + # THROUGHPUT + hdb_throughput_map = { + "data" = var.hyperdisk_balanced_throughput_default + "log" = var.hyperdisk_balanced_throughput_default + "shared" = null + "usrsap" = null + "unified" = var.hyperdisk_balanced_throughput_default + "worker" = var.hyperdisk_balanced_throughput_default + "backup" = var.hyperdisk_balanced_throughput_default + } + null_throughput_map = { + "data" = null + "log" = null + "shared" = null + "usrsap" = null + "unified" = null + "worker" = null + "backup" = null + } + throughput_map = { + "pd-ssd" = local.null_throughput_map + "pd-balanced" = local.null_throughput_map + "pd-extreme" = local.null_throughput_map + "hyperdisk-balanced" = local.hdb_throughput_map + "hyperdisk-extreme" = local.null_throughput_map + } - # change PD-HDD size if a custom backup size has been set - pdhdd_size = var.sap_hana_backup_size > 0 ? var.sap_hana_backup_size : local.pdhdd_size_default + final_data_throughput = var.data_disk_throughput_override == null ? local.throughput_map[local.final_data_disk_type]["data"] : var.data_disk_throughput_override + final_log_throughput = var.log_disk_throughput_override == null ? local.throughput_map[local.final_log_disk_type]["log"] : var.log_disk_throughput_override + final_shared_throughput = var.shared_disk_throughput_override == null ? local.throughput_map[local.final_shared_disk_type]["shared"] : var.shared_disk_throughput_override + final_usrsap_throughput = var.usrsap_disk_throughput_override == null ? local.throughput_map[local.final_usrsap_disk_type]["usrsap"] : var.usrsap_disk_throughput_override + final_unified_throughput = var.unified_disk_throughput_override == null ? local.throughput_map[var.disk_type]["unified"] : var.unified_disk_throughput_override + final_unified_worker_throughput = var.unified_worker_disk_throughput_override == null ? local.throughput_map[var.disk_type]["worker"] : var.unified_worker_disk_throughput_override + final_backup_throughput = var.backup_disk_throughput_override == null ? local.throughput_map[var.backup_disk_type]["backup"] : var.backup_disk_throughput_override # network config variables zone_split = split("-", var.zone) @@ -109,6 +224,31 @@ locals { "projects/${var.project_id}/regions/${local.region}/subnetworks/${var.subnetwork}") primary_startup_url = var.sap_deployment_debug ? replace(var.primary_startup_url, "bash -s", "bash -x -s") : var.primary_startup_url secondary_startup_url = var.sap_deployment_debug ? replace(var.secondary_startup_url, "bash -s", "bash -x -s") : var.secondary_startup_url + + has_shared_nfs = !( var.sap_hana_shared_nfs == "" && var.sap_hana_shared_nfs_resource == null) + make_shared_disk = !var.use_single_shared_data_log_disk && !local.has_shared_nfs + + use_backup_disk = (var.include_backup_disk && var.sap_hana_backup_nfs == "" && var.sap_hana_backup_nfs_resource == null) + + both_backup_nfs_defined = (var.sap_hana_backup_nfs != "") && var.sap_hana_backup_nfs_resource != null + both_shared_nfs_defined = (var.sap_hana_shared_nfs != "") && var.sap_hana_shared_nfs_resource != null + + backup_nfs_endpoint = var.sap_hana_backup_nfs_resource == null ? var.sap_hana_backup_nfs : "${var.sap_hana_backup_nfs_resource.networks[0].ip_addresses[0]}:/${var.sap_hana_backup_nfs_resource.file_shares[0].name}" + shared_nfs_endpoint = var.sap_hana_shared_nfs_resource == null ? var.sap_hana_shared_nfs : "${var.sap_hana_shared_nfs_resource.networks[0].ip_addresses[0]}:/${var.sap_hana_shared_nfs_resource.file_shares[0].name}" + +} + +data "assert_test" "one_backup" { + test = local.use_backup_disk || !local.both_backup_nfs_defined + throw = "Either use a disk for /backup (include_backup_disk) or use NFS. If using an NFS as /backup then only either sap_hana_backup_nfs or sap_hana_backup_nfs_resource may be defined." +} +data "assert_test" "one_shared" { + test = !local.both_shared_nfs_defined + throw = "If using an NFS as /shared then only either sap_hana_shared_nfs or sap_hana_shared_nfs_resource may be defined." +} +data "assert_test" "both_or_neither_nfs" { + test = (local.backup_nfs_endpoint == "") == (local.shared_nfs_endpoint == "") + throw = "If either NFS is defined, then both /shared and /backup must be defined." } ################################################################################ @@ -118,7 +258,7 @@ locals { resource "google_compute_disk" "sap_hana_boot_disks" { count = var.sap_hana_scaleout_nodes + 1 name = format("${var.instance_name}-boot%05d", count.index + 1) - type = "pd-standard" + type = "pd-balanced" zone = var.zone size = 30 # GB project = var.project_id @@ -132,22 +272,80 @@ resource "google_compute_disk" "sap_hana_boot_disks" { } } -resource "google_compute_disk" "sap_hana_pdssd_disks" { - count = var.sap_hana_scaleout_nodes + 1 - # TODO(b/202736714): check if name is correct - name = format("${var.instance_name}-pdssd%05d", count.index + 1) - type = "pd-ssd" +resource "google_compute_disk" "sap_hana_unified_disks" { + count = var.use_single_shared_data_log_disk ? 1 : 0 + name = format("${var.instance_name}-hana") + type = var.disk_type + zone = var.zone + size = local.unified_pd_size + project = var.project_id + provisioned_iops = local.final_unified_iops + provisioned_throughput = local.final_unified_throughput +} +resource "google_compute_disk" "sap_hana_unified_worker_disks" { + count = var.use_single_shared_data_log_disk ? var.sap_hana_scaleout_nodes : 0 + name = format("${var.instance_name}-hana%05d", count.index + 1) + type = var.disk_type + zone = var.zone + size = local.unified_worker_pd_size + project = var.project_id + provisioned_iops = local.final_unified_worker_iops + provisioned_throughput = local.final_unified_throughput +} + +# Split data/log/sap disks +resource "google_compute_disk" "sap_hana_data_disks" { + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = format("${var.instance_name}-data%05d", count.index + 1) + type = local.final_data_disk_type + zone = var.zone + size = local.data_pd_size + project = var.project_id + provisioned_iops = local.final_data_iops + provisioned_throughput = local.final_data_throughput +} + +resource "google_compute_disk" "sap_hana_log_disks" { + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = format("${var.instance_name}-log%05d", count.index + 1) + type = local.final_log_disk_type + zone = var.zone + size = local.log_pd_size + project = var.project_id + provisioned_iops = local.final_log_iops + provisioned_throughput = local.final_log_throughput +} +resource "google_compute_disk" "sap_hana_shared_disk" { + count = local.make_shared_disk ? 1 : 0 + name = format("${var.instance_name}-shared%05d", count.index + 1) + type = local.final_shared_disk_type + zone = var.zone + size = local.shared_pd_size + project = var.project_id + provisioned_iops = local.final_shared_iops + provisioned_throughput = local.final_shared_throughput +} +resource "google_compute_disk" "sap_hana_usrsap_disks" { + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = format("${var.instance_name}-usrsap%05d", count.index + 1) + type = local.final_usrsap_disk_type zone = var.zone - size = local.pdssd_size + size = local.usrsap_pd_size project = var.project_id + provisioned_iops = local.final_usrsap_iops + provisioned_throughput = local.final_usrsap_throughput } + + resource "google_compute_disk" "sap_hana_backup_disk" { - # TODO(b/202736714): check if name is correct + count = local.use_backup_disk ? 1 : 0 name = "${var.instance_name}-backup" - type = "pd-standard" + type = var.backup_disk_type zone = var.zone - size = local.pdhdd_size + size = local.backup_size project = var.project_id + provisioned_iops = local.final_backup_iops + provisioned_throughput = local.final_backup_throughput } ################################################################################ @@ -160,6 +358,7 @@ resource "google_compute_address" "sap_hana_vm_ip" { address_type = "INTERNAL" region = local.region project = var.project_id + address = var.vm_static_ip } resource "google_compute_address" "sap_hana_worker_ip" { @@ -169,6 +368,7 @@ resource "google_compute_address" "sap_hana_worker_ip" { address_type = "INTERNAL" region = local.region project = var.project_id + address = length(var.worker_static_ips) > count.index ? var.worker_static_ips[count.index] : "" } ################################################################################ @@ -187,14 +387,49 @@ resource "google_compute_instance" "sap_hana_primary_instance" { source = google_compute_disk.sap_hana_boot_disks[0].self_link } - attached_disk { - device_name = google_compute_disk.sap_hana_pdssd_disks[0].name - source = google_compute_disk.sap_hana_pdssd_disks[0].self_link + + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [1] : [] + content { + device_name = google_compute_disk.sap_hana_unified_disks[0].name + source = google_compute_disk.sap_hana_unified_disks[0].self_link + } } - attached_disk { - device_name = google_compute_disk.sap_hana_backup_disk.name - source = google_compute_disk.sap_hana_backup_disk.self_link + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_data_disks[0].name + source = google_compute_disk.sap_hana_data_disks[0].self_link + } + } + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_log_disks[0].name + source = google_compute_disk.sap_hana_log_disks[0].self_link + } + } + dynamic attached_disk { + for_each = length(google_compute_disk.sap_hana_shared_disk) > 0 ? [1] : [] + content { + device_name = google_compute_disk.sap_hana_shared_disk[0].name + source = google_compute_disk.sap_hana_shared_disk[0].self_link + } + } + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_usrsap_disks[0].name + source = google_compute_disk.sap_hana_usrsap_disks[0].self_link + } + } + dynamic attached_disk { + for_each = length(google_compute_disk.sap_hana_backup_disk) > 0 ? [1] : [] + content { + device_name = google_compute_disk.sap_hana_backup_disk[0].name + source = google_compute_disk.sap_hana_backup_disk[0].self_link + } } can_ip_forward = var.can_ip_forward @@ -202,6 +437,7 @@ resource "google_compute_instance" "sap_hana_primary_instance" { network_interface { subnetwork = local.subnetwork_uri network_ip = google_compute_address.sap_hana_vm_ip.address + nic_type = var.nic_type == "" ? null : var.nic_type # we only include access_config if public_ip is true, an empty access_config # will create an ephemeral public ip dynamic "access_config" { @@ -221,6 +457,7 @@ resource "google_compute_instance" "sap_hana_primary_instance" { ] } + dynamic "reservation_affinity" { for_each = length(var.reservation_name) > 1 ? [1] : [] content { @@ -241,12 +478,18 @@ resource "google_compute_instance" "sap_hana_primary_instance" { sap_hana_instance_number = var.sap_hana_instance_number sap_hana_sidadm_password = var.sap_hana_sidadm_password sap_hana_sidadm_password_secret = var.sap_hana_sidadm_password_secret - # wording on system_password may be inconsitent with DM sap_hana_system_password = var.sap_hana_system_password sap_hana_system_password_secret = var.sap_hana_system_password_secret sap_hana_sidadm_uid = var.sap_hana_sidadm_uid sap_hana_sapsys_gid = var.sap_hana_sapsys_gid + sap_hana_shared_nfs = local.shared_nfs_endpoint + sap_hana_backup_nfs = local.backup_nfs_endpoint sap_hana_scaleout_nodes = var.sap_hana_scaleout_nodes + use_single_shared_data_log_disk = var.use_single_shared_data_log_disk + sap_hana_backup_disk = local.use_backup_disk + sap_hana_shared_disk = local.make_shared_disk + sap_hana_data_disk_type = local.final_data_disk_type + enable_fast_restart = var.enable_fast_restart template-type = "TERRAFORM" } @@ -271,9 +514,34 @@ resource "google_compute_instance" "sap_hana_worker_instances" { source = google_compute_disk.sap_hana_boot_disks[count.index + 1].self_link } - attached_disk { - device_name = google_compute_disk.sap_hana_pdssd_disks[count.index + 1].name - source = google_compute_disk.sap_hana_pdssd_disks[count.index + 1].self_link + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [1] : [] + content { + device_name = google_compute_disk.sap_hana_unified_worker_disks[count.index].name + source = google_compute_disk.sap_hana_unified_worker_disks[count.index].self_link + } + } + + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_data_disks[count.index + 1].name + source = google_compute_disk.sap_hana_data_disks[count.index + 1].self_link + } + } + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_log_disks[count.index + 1].name + source = google_compute_disk.sap_hana_log_disks[count.index + 1].self_link + } + } + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_usrsap_disks[count.index + 1].name + source = google_compute_disk.sap_hana_usrsap_disks[count.index + 1].self_link + } } can_ip_forward = var.can_ip_forward @@ -281,6 +549,7 @@ resource "google_compute_instance" "sap_hana_worker_instances" { network_interface { subnetwork = local.subnetwork_uri network_ip = google_compute_address.sap_hana_worker_ip[count.index].address + nic_type = var.nic_type == "" ? null : var.nic_type # we only include access_config if public_ip is true, an empty access_config # will create an ephemeral public ip dynamic "access_config" { @@ -320,12 +589,17 @@ resource "google_compute_instance" "sap_hana_worker_instances" { sap_hana_instance_number = var.sap_hana_instance_number sap_hana_sidadm_password = var.sap_hana_sidadm_password sap_hana_sidadm_password_secret = var.sap_hana_sidadm_password_secret - # wording on system_password may be inconsitent with DM sap_hana_system_password = var.sap_hana_system_password sap_hana_system_password_secret = var.sap_hana_system_password_secret sap_hana_sidadm_uid = var.sap_hana_sidadm_uid sap_hana_sapsys_gid = var.sap_hana_sapsys_gid + sap_hana_shared_nfs = local.shared_nfs_endpoint + sap_hana_backup_nfs = local.backup_nfs_endpoint sap_hana_scaleout_nodes = var.sap_hana_scaleout_nodes + use_single_shared_data_log_disk = var.use_single_shared_data_log_disk + sap_hana_backup_disk = false + sap_hana_shared_disk = false + enable_fast_restart = var.enable_fast_restart template-type = "TERRAFORM" } diff --git a/modules/sap_hana/outputs.tf b/modules/sap_hana/outputs.tf index 7af04fc0..491afae6 100644 --- a/modules/sap_hana/outputs.tf +++ b/modules/sap_hana/outputs.tf @@ -13,7 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - output "sap_hana_primary_self_link" { description = "SAP HANA self-link for the primary instance created" value = google_compute_instance.sap_hana_primary_instance.self_link diff --git a/modules/sap_hana/variables.tf b/modules/sap_hana/variables.tf index 7b317ef8..dff8c6ce 100644 --- a/modules/sap_hana/variables.tf +++ b/modules/sap_hana/variables.tf @@ -13,7 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - variable "project_id" { type = string description = "Project id where the instances will be created." @@ -55,7 +54,11 @@ variable "instance_name" { variable "sap_hana_deployment_bucket" { type = string - description = "The GCS bucket containing the SAP HANA media. If this is not defined, the GCE instance will be provisioned without SAP HANA installed." + description = "The Cloud Storage path that contains the SAP HANA media, do not include gs://. If this is not defined, the GCE instance will be provisioned without SAP HANA installed." + validation { + condition = (! (length(regexall( "gs:", var.sap_hana_deployment_bucket)) > 0)) + error_message = "The sap_hana_deployment_bucket must only contain the Cloud Storage path, which includes the bucket name and the names of any folders. Do not include gs://." + } default = "" } @@ -74,7 +77,7 @@ variable "sap_hana_instance_number" { default = 0 validation { condition = (var.sap_hana_instance_number >= 0) && (var.sap_hana_instance_number < 100) - error_message = "The sap_hana_instance_number must be 2 digits long." + error_message = "The sap_hana_instance_number must be a number between 0 and 99." } } @@ -127,10 +130,64 @@ variable "sap_hana_scaleout_nodes" { error_message = "The sap_hana_scaleout_nodes must be positive or 0." } } +variable "sap_hana_backup_nfs_resource" { + default = null + type = object({ + networks = list( + object({ + ip_addresses = list(string) + }) + ) + file_shares = list( + object({ + name = string + }) + ) + + }) + description = "NFS resource to be used as the backup drive instead of a disk. This and sap_hana_backup_nfs may not both be set." +} +variable "sap_hana_shared_nfs_resource" { + default = null + type = object({ + networks = list( + object({ + ip_addresses = list(string) + }) + ) + file_shares = list( + object({ + name = string + }) + ) + + }) + description = "NFS resource to be used as the shared drive instead of a disk. This and sap_hana_shared_nfs may not both be set." +} + +variable "sap_hana_shared_nfs" { + type = string + default = "" + validation { + condition = var.sap_hana_shared_nfs == "" || can(regex("(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}:\\/[^[:space:]]*", var.sap_hana_shared_nfs)) + error_message = "The sap_hana_backup_nfs must be an IP address followed by ':/' then some name." + } + description = "NFS endpoint for /hana/shared storage." +} + +variable "sap_hana_backup_nfs" { + type = string + default = "" + validation { + condition = var.sap_hana_backup_nfs == "" || can(regex("(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}:\\/[^[:space:]]*", var.sap_hana_backup_nfs)) + error_message = "The sap_hana_backup_nfs must be an IP address followed by ':/' then some name." + } + description = "NFS endpoint for /hanabackup storage." +} variable "sap_hana_backup_size" { type = number - description = "Size in GB of the /hanabackup volume. If this is not set or set to zero, the GCE instance will be provisioned with a hana backup volume of 2 times the total memory." + description = "Size in GB of the /hanabackup volume. If this is not set or set to zero, the GCE instance will be provisioned with a hana backup volume of 2 times the total memory. If sap_hana_backup_nfs is set, this setting is ignored." default = 0 validation { condition = var.sap_hana_backup_size >= 0 @@ -199,18 +256,239 @@ variable "post_deployment_script" { default = "" } +variable "nic_type" { + type = string + description = "Optional - This value determines the type of NIC to use, valid options are GVNIC and VIRTIO_NET. If choosing GVNIC make sure that it is supported by your OS choice here https://cloud.google.com/compute/docs/images/os-details#networking." + validation { + condition = contains(["VIRTIO_NET", "GVNIC", ""], var.nic_type) + error_message = "The nic_type must be either GVNIC or VIRTIO_NET." + } + default = "" +} + +variable "disk_type" { + type = string + description = "Optional - The default disk type to use on all disks deployed. Extreme disks are not supported on all machine types. See https://cloud.google.com/compute/docs/disks/ for details." + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme"], var.disk_type) + error_message = "The disk_type must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "pd-ssd" +} + +variable "use_single_shared_data_log_disk" { + type = bool + description = "Optional - By default three separate disk for data, logs, and shared will be made. If set to true, one disk will be used instead." + default = false +} + +variable "include_backup_disk" { + type = bool + description = "Optional - The default is true. If set creates a disk for backups." + default = true +} + +variable "backup_disk_type" { + type = string + description = "Optional - The default is pd-balanced, only used if a backup disk is needed." + default = "pd-balanced" + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-standard", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme"], var.backup_disk_type) + error_message = "The disk_type must be either pd-ssd, pd-balanced, pd-standard, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } +} + +variable "vm_static_ip" { + type = string + description = "Optional - Defines an internal static IP for the VM." + validation { + condition = var.vm_static_ip == "" || can(regex("^(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$", var.vm_static_ip)) + error_message = "The vm_static_ip must be a valid IP address." + } + default = "" +} + +variable "worker_static_ips" { + type = list(string) + description = "Optional - Defines internal static IP addresses for the worker nodes." + validation { + condition = alltrue([ + for ip in var.worker_static_ips : ip == "" || can(regex("^(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$", ip)) + ]) + error_message = "All worker_static_ips must be valid IP addresses." + } + default = [] +} + +variable "enable_fast_restart" { + type = bool + description = "Optional - The default is true. If set enables HANA Fast Restart." + default = true +} + +variable "hyperdisk_balanced_iops_default" { + type = number + description = "Optional - default is 3000. Number of IOPS that is set for each disk of type Hyperdisk-balanced (except for boot/usrsap/shared disks)." + default = 3000 +} + +variable "hyperdisk_balanced_throughput_default" { + type = number + description = "Optional - default is 750. Throughput in MB/s that is set for each disk of type Hyperdisk-balanced (except for boot/usrsap/shared disks)." + default = 750 +} + # -# DO NOT MODIFY unless you know what you are doing +# DO NOT MODIFY unless instructed or aware of the implications of using those settings # + +variable "data_disk_type_override" { + type = string + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the data disk." + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.data_disk_type_override) + error_message = "The data_disk_type_override must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "" +} +variable "log_disk_type_override" { + type = string + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the log disk." + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.log_disk_type_override) + error_message = "The log_disk_type_override must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "" +} +variable "shared_disk_type_override" { + type = string + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the shared disk." + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.shared_disk_type_override) + error_message = "The shared_disk_type_override must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "" +} +variable "usrsap_disk_type_override" { + type = string + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the /usr/sap disk." + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.usrsap_disk_type_override) + error_message = "The usrsap_disk_type_override must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "" +} + +variable "unified_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the primary's unified disk, that is based off of the machine_type." + default = null +} +variable "unified_worker_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the unified worker disk(s), that is based off of the machine_type." + default = null +} +variable "data_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the data disk(s), that is based off of the machine_type." + default = null +} +variable "log_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the log disk(s), that is based off of the machine_type." + default = null +} +variable "shared_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the shared disk, that is based off of the machine_type." + default = null +} +variable "usrsap_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the /usr/sap disk(s), that is based off of the machine_type." + default = null +} + +variable "unified_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the primary's unified disk will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "unified_worker_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the unified worker disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "data_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the data disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "log_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the log disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "shared_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the shared disk will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "usrsap_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the /usr/sap disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "backup_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the backup disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "unified_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the primary's unified disk will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "unified_worker_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the unified worker disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "data_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the data disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "log_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the log disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "shared_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the shared disk will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "usrsap_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the /usr/sap disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "backup_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the backup disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} variable "primary_startup_url" { type = string description = "Startup script to be executed when the VM boots, should not be overridden." - default = "curl -s BUILD.TERRA_SH_URL/sap_hana/startup.sh | bash -s BUILD.TERRA_SH_URL" + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform/sap_hana/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform" } variable "secondary_startup_url" { type = string - default = "curl -s BUILD.TERRA_SH_URL/sap_hana/startup_secondary.sh | bash -s BUILD.TERRA_SH_URL" + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform/sap_hana/startup_secondary.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform" description = "DO NOT USE" } @@ -218,4 +496,4 @@ variable "can_ip_forward" { type = bool description = "Whether sending and receiving of packets with non-matching source or destination IPs is allowed." default = true -} +} \ No newline at end of file diff --git a/modules/sap_hana/versions.tf b/modules/sap_hana/versions.tf index 9baec7e4..27705ac0 100644 --- a/modules/sap_hana/versions.tf +++ b/modules/sap_hana/versions.tf @@ -13,17 +13,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - terraform { required_version = ">=0.12.6" required_providers { - google = { - source = "hashicorp/google" - version = ">= 4.0.0, < 6" + google = {} + assert = { + source = "bwoznicki/assert" + version = "0.0.1" } } - - provider_meta "google" { - module_name = "blueprints/terraform/terraform-google-sap:sap_hana/v1.1.1" - } } diff --git a/modules/sap_hana_ha/main.tf b/modules/sap_hana_ha/main.tf index 18d41cb9..40ac897b 100644 --- a/modules/sap_hana_ha/main.tf +++ b/modules/sap_hana_ha/main.tf @@ -13,12 +13,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - # # Terraform SAP HANA HA for Google Cloud # -# Version: 2.0.2022101419281665775728 -# Build Hash: 5f4ef08feb4fed0e1eabc3bfc4b2d64d99001ae7 +# Version: 2.0.202402230649 +# Build Hash: c745a89b214d491fa9b641e2fff78abfe9965016 # ################################################################################ @@ -31,7 +30,7 @@ locals { "n1-highmem-96" = 624 "n1-megamem-96" = 1433 "n2-highmem-32" = 256 - "n2-highmem-48" = 386 + "n2-highmem-48" = 384 "n2-highmem-64" = 512 "n2-highmem-80" = 640 "n2-highmem-96" = 768 @@ -43,13 +42,21 @@ locals { "m1-ultramem-40" = 961 "m1-ultramem-80" = 1922 "m1-ultramem-160" = 3844 - "m2-ultramem-208" = 5916 - "m2-megamem-416" = 5916 + "m2-ultramem-208" = 5888 + "m2-megamem-416" = 5888 "m2-hypermem-416" = 8832 - "m2-ultramem-416" = 11832 + "m2-ultramem-416" = 11744 + "m3-megamem-64" = 976 + "m3-megamem-128" = 1952 + "m3-ultramem-32" = 976 + "m3-ultramem-64" = 1952 + "m3-ultramem-128" = 3904 + "c3-standard-44" = 176 + "c3-highmem-44" = 352 + "c3-highmem-88" = 704 + "c3-highmem-176" = 1408 } cpu_platform_map = { - "n1-standard-16" = "Intel Haswell" "n1-highmem-32" = "Intel Broadwell" "n1-highmem-64" = "Intel Broadwell" "n1-highmem-96" = "Intel Skylake" @@ -71,24 +78,137 @@ locals { "m2-megamem-416" = "Automatic" "m2-hypermem-416" = "Automatic" "m2-ultramem-416" = "Automatic" + "m3-megamem-64" = "Automatic" + "m3-megamem-128" = "Automatic" + "m3-ultramem-32" = "Automatic" + "m3-ultramem-64" = "Automatic" + "m3-ultramem-128" = "Automatic" + "c3-standard-44" = "Automatic" + "c3-highmem-44" = "Automatic" + "c3-highmem-88" = "Automatic" + "c3-highmem-176" = "Automatic" + } + + # Minimum disk sizes are used to ensure throughput. Extreme disks don't need this. + # All 'over provisioned' capacity is to go onto the data disk. + min_total_disk_map = { + "pd-ssd" = 550 + "pd-balanced" = 943 + "pd-extreme" = 0 + "hyperdisk-balanced" = 0 + "hyperdisk-extreme" = 0 } + min_total_disk = local.min_total_disk_map[var.disk_type] + mem_size = lookup(local.mem_size_map, var.machine_type, 320) - hana_log_size_min = min(512, max(64, local.mem_size / 2)) - hana_data_size_min = local.mem_size * 12 / 10 - hana_shared_size_min = min(1024, local.mem_size) + hana_log_size = ceil(min(512, max(64, local.mem_size / 2))) + hana_data_size_min = ceil(local.mem_size * 12 / 10) + hana_shared_size = min(1024, local.mem_size) + hana_usrsap_size = 32 default_boot_size = 30 - hana_log_size = local.hana_log_size_min - hana_data_size = local.hana_data_size_min + hana_data_size = max(local.hana_data_size_min, local.min_total_disk - local.hana_usrsap_size - local.hana_log_size - local.hana_shared_size ) all_network_tag_items = concat(var.network_tags, ["sap-${local.healthcheck_name}-port"]) network_tags = local.all_network_tag_items - pdhdd_size = var.sap_hana_backup_size > 0 ? var.sap_hana_backup_size : 2 * local.mem_size - # ensure pd-ssd meets minimum size/performance - pdssd_size = ceil(max(834, local.hana_log_size + local.hana_data_size + local.hana_shared_size_min + 32 + 1)) + # ensure the combined disk meets minimum size/performance + pd_size = ceil(max(local.min_total_disk, local.hana_log_size + local.hana_data_size_min + local.hana_shared_size + local.hana_usrsap_size + 1)) + + temp_shared_disk_type = contains(["hyperdisk-extreme", "hyperdisk-balanced", "pd-extreme"], var.disk_type) ? "pd-balanced" : var.disk_type + temp_usrsap_disk_type = contains(["hyperdisk-extreme", "hyperdisk-balanced", "pd-extreme"], var.disk_type) ? "pd-balanced" : var.disk_type + + final_data_disk_type = var.data_disk_type_override == "" ? var.disk_type : var.data_disk_type_override + final_log_disk_type = var.log_disk_type_override == "" ? var.disk_type : var.log_disk_type_override + final_shared_disk_type = var.shared_disk_type_override == "" ? local.temp_shared_disk_type : var.shared_disk_type_override + final_usrsap_disk_type = var.usrsap_disk_type_override == "" ? local.temp_usrsap_disk_type : var.usrsap_disk_type_override + + + unified_pd_size = var.unified_disk_size_override == null ? local.pd_size : var.unified_disk_size_override + data_pd_size = var.data_disk_size_override == null ? local.hana_data_size : var.data_disk_size_override + log_pd_size = var.log_disk_size_override == null ? local.hana_log_size : var.log_disk_size_override + shared_pd_size = var.shared_disk_size_override == null ? local.hana_shared_size : var.shared_disk_size_override + usrsap_pd_size = var.usrsap_disk_size_override == null ? local.hana_usrsap_size : var.usrsap_disk_size_override + backup_pd_size = var.sap_hana_backup_size > 0 ? var.sap_hana_backup_size : 2 * local.mem_size + + # IOPS + hdx_iops_map = { + "data" = max(10000, local.data_pd_size*2) + "log" = max(10000, local.log_pd_size*2) + "shared" = null + "usrsap" = null + "unified" = max(10000, local.data_pd_size*2) + max(10000, local.log_pd_size*2) + "worker" = max(10000, local.data_pd_size*2) + max(10000, local.log_pd_size*2) + "backup" = max(10000, 2 * local.backup_pd_size) + } + hdb_iops_map = { + "data" = var.hyperdisk_balanced_iops_default + "log" = var.hyperdisk_balanced_iops_default + "shared" = null + "usrsap" = null + "unified" = var.hyperdisk_balanced_iops_default + "worker" = var.hyperdisk_balanced_iops_default + "backup" = var.hyperdisk_balanced_iops_default + } + null_iops_map = { + "data" = null + "log" = null + "shared" = null + "usrsap" = null + "unified" = null + "worker" = null + "backup" = null + } + iops_map = { + "pd-ssd" = local.null_iops_map + "pd-balanced" = local.null_iops_map + "pd-extreme" = local.hdx_iops_map + "hyperdisk-balanced" = local.hdb_iops_map + "hyperdisk-extreme" = local.hdx_iops_map + } + + final_data_iops = var.data_disk_iops_override == null ? local.iops_map[local.final_data_disk_type]["data"] : var.data_disk_iops_override + final_log_iops = var.log_disk_iops_override == null ? local.iops_map[local.final_log_disk_type]["log"] : var.log_disk_iops_override + final_shared_iops = var.shared_disk_iops_override == null ? local.iops_map[local.final_shared_disk_type]["shared"] : var.shared_disk_iops_override + final_usrsap_iops = var.usrsap_disk_iops_override == null ? local.iops_map[local.final_usrsap_disk_type]["usrsap"] : var.usrsap_disk_iops_override + final_unified_iops = var.unified_disk_iops_override == null ? local.iops_map[var.disk_type]["unified"] : var.unified_disk_iops_override + final_backup_iops = var.backup_disk_iops_override == null ? local.iops_map[var.backup_disk_type]["backup"] : var.backup_disk_iops_override + + # THROUGHPUT + hdb_throughput_map = { + "data" = var.hyperdisk_balanced_throughput_default + "log" = var.hyperdisk_balanced_throughput_default + "shared" = null + "usrsap" = null + "unified" = var.hyperdisk_balanced_throughput_default + "worker" = var.hyperdisk_balanced_throughput_default + "backup" = var.hyperdisk_balanced_throughput_default + } + null_throughput_map = { + "data" = null + "log" = null + "shared" = null + "usrsap" = null + "unified" = null + "worker" = null + "backup" = null + } + throughput_map = { + "pd-ssd" = local.null_throughput_map + "pd-balanced" = local.null_throughput_map + "pd-extreme" = local.null_throughput_map + "hyperdisk-balanced" = local.hdb_throughput_map + "hyperdisk-extreme" = local.null_throughput_map + } + + final_data_throughput = var.data_disk_throughput_override == null ? local.throughput_map[local.final_data_disk_type]["data"] : var.data_disk_throughput_override + final_log_throughput = var.log_disk_throughput_override == null ? local.throughput_map[local.final_log_disk_type]["log"] : var.log_disk_throughput_override + final_shared_throughput = var.shared_disk_throughput_override == null ? local.throughput_map[local.final_shared_disk_type]["shared"] : var.shared_disk_throughput_override + final_usrsap_throughput = var.usrsap_disk_throughput_override == null ? local.throughput_map[local.final_usrsap_disk_type]["usrsap"] : var.usrsap_disk_throughput_override + final_unified_throughput = var.unified_disk_throughput_override == null ? local.throughput_map[var.disk_type]["unified"] : var.unified_disk_throughput_override + final_backup_throughput = var.backup_disk_throughput_override == null ? local.throughput_map[var.backup_disk_type]["backup"] : var.backup_disk_throughput_override sap_vip_solution = "ILB" sap_hc_port = 60000 + var.sap_hana_instance_number @@ -140,6 +260,42 @@ locals { goog-wl-os = local.os_full_name }) : {} + + primary_startup_url = var.sap_deployment_debug ? replace(var.primary_startup_url, "bash -s", "bash -x -s") : var.primary_startup_url + worker_startup_url = var.sap_deployment_debug ? replace(var.worker_startup_url, "bash -s", "bash -x -s") : var.worker_startup_url + secondary_startup_url = var.sap_deployment_debug ? replace(var.secondary_startup_url, "bash -s", "bash -x -s") : var.secondary_startup_url + mm_startup_url = var.sap_deployment_debug ? replace(var.majority_maker_startup_url, "bash -s", "bash -x -s") : var.majority_maker_startup_url + + # HA Scaleout features + mm_partially_defined = (var.majority_maker_instance_name != "") || (var.majority_maker_machine_type != "") || (var.majority_maker_zone != "") + mm_fully_defined = (var.majority_maker_instance_name != "") && (var.majority_maker_machine_type != "") && (var.majority_maker_zone != "") + mm_zone_split = split("-", var.majority_maker_zone) + mm_region = length(local.mm_zone_split) < 3 ? "" : join("-", [local.mm_zone_split[0], local.mm_zone_split[1]]) +} + +data "assert_test" "scaleout_needs_mm" { + test = (local.mm_partially_defined && var.sap_hana_scaleout_nodes > 0) || (!local.mm_partially_defined && var.sap_hana_scaleout_nodes == 0) + throw = "sap_hana_scaleout_nodes and all majority_maker variables must be specified together: majority_maker_instance_name, majority_maker_machine_type, majority_maker_zone" +} + +data "assert_test" "fully_specify_mm" { + test = !local.mm_partially_defined || local.mm_fully_defined + throw = "majority_maker_instance_name, majority_maker_machine_type, and majority_maker_zone must all be specified together" +} + +data "assert_test" "mm_region_check" { + test = !local.mm_fully_defined || local.mm_region == local.region + throw = "Majority maker must be in the same region as the primary and secondary instances" +} + +resource "validation_warning" "mm_zone_warning" { + condition = (var.majority_maker_zone == var.primary_zone) || (var.majority_maker_zone == var.secondary_zone) + summary = "It is recommended that the Majority Maker exist in a separate zone but same region from the primary and secondary instances." +} + +data "assert_test" "no_rhel_with_scaleout" { + test = var.sap_hana_scaleout_nodes == 0 || ! can(regex("rhel", var.linux_image_project)) + throw = "HANA HA Scaleout deployment is currently only supported on SLES operating systems." } ################################################################################ @@ -153,8 +309,21 @@ resource "google_compute_address" "sap_hana_ha_vm_ip" { address_type = "INTERNAL" region = local.region project = var.project_id + address = count.index == 0 ? var.primary_static_ip : var.secondary_static_ip } +resource "google_compute_address" "sap_hana_ha_worker_vm_ip" { + count = var.sap_hana_scaleout_nodes * 2 + name = (count.index % 2) == 0 ? "${var.primary_instance_name}w${floor(count.index / 2) + 1}-vm-ip" : "${var.secondary_instance_name}w${floor(count.index / 2) + 1}-vm-ip" + subnetwork = local.subnetwork_uri + address_type = "INTERNAL" + region = local.region + project = var.project_id + # The worker node IPs are all in one list, alternating between primary and secondary + address = (count.index % 2) == 0 ? ( + length(var.primary_worker_static_ips) > floor(count.index / 2) ? var.primary_worker_static_ips[floor(count.index / 2)] : "") : ( + length(var.secondary_worker_static_ips) > floor(count.index / 2) ? var.secondary_worker_static_ips[floor(count.index / 2)] : "") +} ################################################################################ # Primary Instance @@ -162,9 +331,10 @@ resource "google_compute_address" "sap_hana_ha_vm_ip" { ################################################################################ # disks ################################################################################ -resource "google_compute_disk" "sap_hana_ha_primary_boot_disk" { - name = "${var.primary_instance_name}-boot" - type = "pd-standard" +resource "google_compute_disk" "sap_hana_ha_primary_boot_disks" { + count = var.sap_hana_scaleout_nodes + 1 + name = count.index == 0 ? "${var.primary_instance_name}-boot" : "${var.primary_instance_name}w${count.index}-boot" + type = "pd-balanced" zone = var.primary_zone size = local.default_boot_size project = var.project_id @@ -177,24 +347,74 @@ resource "google_compute_disk" "sap_hana_ha_primary_boot_disk" { ignore_changes = [image] } } -resource "google_compute_disk" "sap_hana_ha_primary_pdssd_disk" { - name = "${var.primary_instance_name}-pdssd" - type = "pd-ssd" +resource "google_compute_disk" "sap_hana_ha_primary_unified_disks" { + count = var.use_single_shared_data_log_disk ? var.sap_hana_scaleout_nodes + 1 : 0 + name = count.index == 0 ? "${var.primary_instance_name}-hana" : "${var.primary_instance_name}w${count.index}-hana" + type = var.disk_type + zone = var.primary_zone + size = local.unified_pd_size + project = var.project_id + provisioned_iops = local.final_unified_iops + provisioned_throughput = local.final_unified_throughput +} + +# Split data/log/sap disks +resource "google_compute_disk" "sap_hana_ha_primary_data_disks" { + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = count.index == 0 ? "${var.primary_instance_name}-data" : "${var.primary_instance_name}w${count.index}-data" + type = local.final_data_disk_type + zone = var.primary_zone + size = local.data_pd_size + project = var.project_id + provisioned_iops = local.final_data_iops + provisioned_throughput = local.final_data_throughput +} + +resource "google_compute_disk" "sap_hana_ha_primary_log_disks" { + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = count.index == 0 ? "${var.primary_instance_name}-log" : "${var.primary_instance_name}w${count.index}-log" + type = local.final_log_disk_type + zone = var.primary_zone + size = local.log_pd_size + project = var.project_id + provisioned_iops = local.final_log_iops + provisioned_throughput = local.final_log_throughput +} +resource "google_compute_disk" "sap_hana_ha_primary_shared_disk" { + count = var.use_single_shared_data_log_disk ? 0 : 1 + name = "${var.primary_instance_name}-shared" + type = local.final_shared_disk_type zone = var.primary_zone - size = local.pdssd_size + size = local.shared_pd_size project = var.project_id + provisioned_iops = local.final_shared_iops + provisioned_throughput = local.final_shared_throughput +} +resource "google_compute_disk" "sap_hana_ha_primary_usrsap_disks" { + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = count.index == 0 ? "${var.primary_instance_name}-usrsap" : "${var.primary_instance_name}w${count.index}-usrsap" + type = local.final_usrsap_disk_type + zone = var.primary_zone + size = local.usrsap_pd_size + project = var.project_id + provisioned_iops = local.final_usrsap_iops + provisioned_throughput = local.final_usrsap_throughput } resource "google_compute_disk" "sap_hana_ha_primary_backup_disk" { + count = var.include_backup_disk ? 1 : 0 name = "${var.primary_instance_name}-backup" - type = "pd-standard" + type = var.backup_disk_type zone = var.primary_zone - size = local.pdhdd_size + size = local.backup_pd_size project = var.project_id + provisioned_iops = local.final_backup_iops + provisioned_throughput = local.final_backup_throughput } ################################################################################ # instance ################################################################################ + resource "google_compute_instance" "sap_hana_ha_primary_instance" { name = var.primary_instance_name machine_type = var.machine_type @@ -206,25 +426,182 @@ resource "google_compute_instance" "sap_hana_ha_primary_instance" { boot_disk { auto_delete = true device_name = "boot" - source = google_compute_disk.sap_hana_ha_primary_boot_disk.self_link + source = google_compute_disk.sap_hana_ha_primary_boot_disks[0].self_link + } + + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [1] : [] + content { + device_name = google_compute_disk.sap_hana_ha_primary_unified_disks[0].name + source = google_compute_disk.sap_hana_ha_primary_unified_disks[0].self_link + } + } + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_primary_data_disks[0].name + source = google_compute_disk.sap_hana_ha_primary_data_disks[0].self_link + } + } + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_primary_log_disks[0].name + source = google_compute_disk.sap_hana_ha_primary_log_disks[0].self_link + } + } + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_primary_shared_disk[0].name + source = google_compute_disk.sap_hana_ha_primary_shared_disk[0].self_link + } + } + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_primary_usrsap_disks[0].name + source = google_compute_disk.sap_hana_ha_primary_usrsap_disks[0].self_link + } + } + + dynamic attached_disk { + for_each = var.include_backup_disk ? [1] : [] + content { + device_name = google_compute_disk.sap_hana_ha_primary_backup_disk[0].name + source = google_compute_disk.sap_hana_ha_primary_backup_disk[0].self_link + } + } + can_ip_forward = var.can_ip_forward + + network_interface { + subnetwork = local.subnetwork_uri + network_ip = google_compute_address.sap_hana_ha_vm_ip.0.address + + nic_type = var.nic_type == "" ? null : var.nic_type + # we only include access_config if public_ip is true, an empty access_config + # will create an ephemeral public ip + dynamic "access_config" { + for_each = var.public_ip ? [1] : [] + content { + } + } + } + + tags = local.network_tags + + service_account { + # The default empty service account string will use the projects default compute engine service account + email = var.service_account + scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] } - attached_disk { - device_name = google_compute_disk.sap_hana_ha_primary_pdssd_disk.name - source = google_compute_disk.sap_hana_ha_primary_pdssd_disk.self_link + dynamic "reservation_affinity" { + for_each = length(var.primary_reservation_name) > 1 ? [1] : [] + content { + type = "SPECIFIC_RESERVATION" + specific_reservation { + key = "compute.googleapis.com/reservation-name" + values = [var.primary_reservation_name] + } + } } - attached_disk { - device_name = google_compute_disk.sap_hana_ha_primary_backup_disk.name - source = google_compute_disk.sap_hana_ha_primary_backup_disk.self_link + labels = local.wlm_labels + + metadata = merge( + { + startup-script = local.primary_startup_url + post_deployment_script = var.post_deployment_script + sap_deployment_debug = var.sap_deployment_debug + sap_hana_deployment_bucket = var.sap_hana_deployment_bucket + sap_hana_sid = var.sap_hana_sid + sap_hana_instance_number = var.sap_hana_instance_number + sap_hana_sidadm_password = var.sap_hana_sidadm_password + sap_hana_sidadm_password_secret = var.sap_hana_sidadm_password_secret + # wording on system_password may be inconsitent with DM + sap_hana_system_password = var.sap_hana_system_password + sap_hana_system_password_secret = var.sap_hana_system_password_secret + sap_hana_sidadm_uid = var.sap_hana_sidadm_uid + sap_hana_sapsys_gid = var.sap_hana_sapsys_gid + sap_vip = var.sap_vip + sap_vip_solution = local.sap_vip_solution + sap_hc_port = local.sap_hc_port + sap_primary_instance = var.primary_instance_name + sap_secondary_instance = var.secondary_instance_name + sap_primary_zone = var.primary_zone + sap_secondary_zone = var.secondary_zone + use_single_shared_data_log_disk = var.use_single_shared_data_log_disk + sap_hana_backup_disk = var.include_backup_disk + sap_hana_shared_disk = !var.use_single_shared_data_log_disk + sap_hana_scaleout_nodes = var.sap_hana_scaleout_nodes + majority_maker_instance_name = local.mm_fully_defined ? var.majority_maker_instance_name : "" + sap_hana_data_disk_type = local.final_data_disk_type + enable_fast_restart = var.enable_fast_restart + template-type = "TERRAFORM" + }, + local.wlm_metadata + ) + + lifecycle { + # Ignore changes in the instance metadata, since it is modified by the SAP startup script. + ignore_changes = [metadata] + } +} + +resource "google_compute_instance" "sap_hana_ha_primary_workers" { + count = var.sap_hana_scaleout_nodes + name = "${var.primary_instance_name}w${count.index + 1}" + machine_type = var.machine_type + zone = var.primary_zone + project = var.project_id + + min_cpu_platform = lookup(local.cpu_platform_map, var.machine_type, "Automatic") + + boot_disk { + auto_delete = true + device_name = "boot" + source = google_compute_disk.sap_hana_ha_primary_boot_disks[count.index+1].self_link } + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [1] : [] + content { + device_name = google_compute_disk.sap_hana_ha_primary_unified_disks[count.index+1].name + source = google_compute_disk.sap_hana_ha_primary_unified_disks[count.index+1].self_link + } + } + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_primary_data_disks[count.index+1].name + source = google_compute_disk.sap_hana_ha_primary_data_disks[count.index+1].self_link + } + } + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_primary_log_disks[count.index+1].name + source = google_compute_disk.sap_hana_ha_primary_log_disks[count.index+1].self_link + } + } + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_primary_usrsap_disks[count.index+1].name + source = google_compute_disk.sap_hana_ha_primary_usrsap_disks[count.index+1].self_link + } + } can_ip_forward = var.can_ip_forward network_interface { subnetwork = local.subnetwork_uri - network_ip = google_compute_address.sap_hana_ha_vm_ip[0].address + # The worker node IPs are all in one list, alternating between primary and secondary + network_ip = google_compute_address.sap_hana_ha_worker_vm_ip[count.index * 2].address + nic_type = var.nic_type == "" ? null : var.nic_type # we only include access_config if public_ip is true, an empty access_config # will create an ephemeral public ip dynamic "access_config" { @@ -259,7 +636,7 @@ resource "google_compute_instance" "sap_hana_ha_primary_instance" { metadata = merge( { - startup-script = var.primary_startup_url + startup-script = local.worker_startup_url post_deployment_script = var.post_deployment_script sap_deployment_debug = var.sap_deployment_debug sap_hana_deployment_bucket = var.sap_hana_deployment_bucket @@ -279,6 +656,12 @@ resource "google_compute_instance" "sap_hana_ha_primary_instance" { sap_secondary_instance = var.secondary_instance_name sap_primary_zone = var.primary_zone sap_secondary_zone = var.secondary_zone + use_single_shared_data_log_disk = var.use_single_shared_data_log_disk + sap_hana_backup_disk = var.include_backup_disk + sap_hana_shared_disk = !var.use_single_shared_data_log_disk + sap_hana_scaleout_nodes = var.sap_hana_scaleout_nodes + majority_maker_instance_name = local.mm_fully_defined ? var.majority_maker_instance_name : "" + enable_fast_restart = var.enable_fast_restart template-type = "TERRAFORM" }, local.wlm_metadata @@ -290,15 +673,17 @@ resource "google_compute_instance" "sap_hana_ha_primary_instance" { } } + ################################################################################ # Secondary Instance ################################################################################ ################################################################################ # disks ################################################################################ -resource "google_compute_disk" "sap_hana_ha_secondary_boot_disk" { - name = "${var.secondary_instance_name}-boot" - type = "pd-standard" +resource "google_compute_disk" "sap_hana_ha_secondary_boot_disks" { + count = var.sap_hana_scaleout_nodes + 1 + name = count.index == 0 ? "${var.secondary_instance_name}-boot" : "${var.secondary_instance_name}w${count.index}-boot" + type = "pd-balanced" zone = var.secondary_zone size = local.default_boot_size project = var.project_id @@ -311,19 +696,68 @@ resource "google_compute_disk" "sap_hana_ha_secondary_boot_disk" { ignore_changes = [image] } } -resource "google_compute_disk" "sap_hana_ha_secondary_pdssd_disk" { - name = "${var.secondary_instance_name}-pdssd" - type = "pd-ssd" +resource "google_compute_disk" "sap_hana_ha_secondary_unified_disks" { + count = var.use_single_shared_data_log_disk ? var.sap_hana_scaleout_nodes + 1 : 0 + name = count.index == 0 ? "${var.secondary_instance_name}-hana" : "${var.secondary_instance_name}w${count.index}-hana" + type = var.disk_type zone = var.secondary_zone - size = local.pdssd_size + size = local.unified_pd_size project = var.project_id + provisioned_iops = local.final_unified_iops + provisioned_throughput = local.final_unified_throughput } + +# Split data/log/sap disks +resource "google_compute_disk" "sap_hana_ha_secondary_data_disks" { + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = count.index == 0 ? "${var.secondary_instance_name}-data" : "${var.secondary_instance_name}w${count.index}-data" + type = local.final_data_disk_type + zone = var.secondary_zone + size = local.data_pd_size + project = var.project_id + provisioned_iops = local.final_data_iops + provisioned_throughput = local.final_data_throughput +} +resource "google_compute_disk" "sap_hana_ha_secondary_log_disks" { + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = count.index == 0 ? "${var.secondary_instance_name}-log" : "${var.secondary_instance_name}w${count.index}-log" + type = local.final_log_disk_type + zone = var.secondary_zone + size = local.log_pd_size + project = var.project_id + provisioned_iops = local.final_log_iops + provisioned_throughput = local.final_log_throughput +} +resource "google_compute_disk" "sap_hana_ha_secondary_shared_disk" { + count = var.use_single_shared_data_log_disk ? 0 : 1 + name = "${var.secondary_instance_name}-shared" + type = local.final_shared_disk_type + zone = var.secondary_zone + size = local.shared_pd_size + project = var.project_id + provisioned_iops = local.final_shared_iops + provisioned_throughput = local.final_shared_throughput +} +resource "google_compute_disk" "sap_hana_ha_secondary_usrsap_disks" { + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = count.index == 0 ? "${var.secondary_instance_name}-usrsap" : "${var.secondary_instance_name}w${count.index}-usrsap" + type = local.final_usrsap_disk_type + zone = var.secondary_zone + size = local.usrsap_pd_size + project = var.project_id + provisioned_iops = local.final_usrsap_iops + provisioned_throughput = local.final_usrsap_throughput +} + resource "google_compute_disk" "sap_hana_ha_secondary_backup_disk" { + count = var.include_backup_disk ? 1 : 0 name = "${var.secondary_instance_name}-backup" - type = "pd-standard" + type = var.backup_disk_type zone = var.secondary_zone - size = local.pdhdd_size + size = local.backup_pd_size project = var.project_id + provisioned_iops = local.final_backup_iops + provisioned_throughput = local.final_backup_throughput } ################################################################################ @@ -340,24 +774,59 @@ resource "google_compute_instance" "sap_hana_ha_secondary_instance" { boot_disk { auto_delete = true device_name = "boot" - source = google_compute_disk.sap_hana_ha_secondary_boot_disk.self_link + source = google_compute_disk.sap_hana_ha_secondary_boot_disks[0].self_link } - attached_disk { - device_name = google_compute_disk.sap_hana_ha_secondary_pdssd_disk.name - source = google_compute_disk.sap_hana_ha_secondary_pdssd_disk.self_link + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [1] : [] + content { + device_name = google_compute_disk.sap_hana_ha_secondary_unified_disks[0].name + source = google_compute_disk.sap_hana_ha_secondary_unified_disks[0].self_link + } + } + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_secondary_data_disks[0].name + source = google_compute_disk.sap_hana_ha_secondary_data_disks[0].self_link + } + } + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_secondary_log_disks[0].name + source = google_compute_disk.sap_hana_ha_secondary_log_disks[0].self_link + } + } + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_secondary_shared_disk[0].name + source = google_compute_disk.sap_hana_ha_secondary_shared_disk[0].self_link + } + } + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_secondary_usrsap_disks[0].name + source = google_compute_disk.sap_hana_ha_secondary_usrsap_disks[0].self_link + } } - attached_disk { - device_name = google_compute_disk.sap_hana_ha_secondary_backup_disk.name - source = google_compute_disk.sap_hana_ha_secondary_backup_disk.self_link + dynamic attached_disk { + for_each = var.include_backup_disk ? [1] : [] + content { + device_name = google_compute_disk.sap_hana_ha_secondary_backup_disk[0].name + source = google_compute_disk.sap_hana_ha_secondary_backup_disk[0].self_link + } } can_ip_forward = var.can_ip_forward network_interface { subnetwork = local.subnetwork_uri - network_ip = google_compute_address.sap_hana_ha_vm_ip[1].address + network_ip = google_compute_address.sap_hana_ha_vm_ip.1.address + nic_type = var.nic_type == "" ? null : var.nic_type # we only include access_config if public_ip is true, an empty access_config # will create an ephemeral public ip dynamic "access_config" { @@ -392,7 +861,7 @@ resource "google_compute_instance" "sap_hana_ha_secondary_instance" { metadata = merge( { - startup-script = var.secondary_startup_url + startup-script = local.secondary_startup_url post_deployment_script = var.post_deployment_script sap_deployment_debug = var.sap_deployment_debug sap_hana_deployment_bucket = var.sap_hana_deployment_bucket @@ -412,6 +881,12 @@ resource "google_compute_instance" "sap_hana_ha_secondary_instance" { sap_secondary_instance = var.secondary_instance_name sap_primary_zone = var.primary_zone sap_secondary_zone = var.secondary_zone + use_single_shared_data_log_disk = var.use_single_shared_data_log_disk + sap_hana_backup_disk = var.include_backup_disk + sap_hana_shared_disk = !var.use_single_shared_data_log_disk + sap_hana_scaleout_nodes = var.sap_hana_scaleout_nodes + majority_maker_instance_name = local.mm_fully_defined ? var.majority_maker_instance_name : "" + enable_fast_restart = var.enable_fast_restart template-type = "TERRAFORM" }, local.wlm_metadata @@ -423,6 +898,127 @@ resource "google_compute_instance" "sap_hana_ha_secondary_instance" { } } +resource "google_compute_instance" "sap_hana_ha_secondary_workers" { + count = var.sap_hana_scaleout_nodes + name = "${var.secondary_instance_name}w${count.index + 1}" + machine_type = var.machine_type + zone = var.secondary_zone + project = var.project_id + + min_cpu_platform = lookup(local.cpu_platform_map, var.machine_type, "Automatic") + + boot_disk { + auto_delete = true + device_name = "boot" + source = google_compute_disk.sap_hana_ha_secondary_boot_disks[count.index+1].self_link + } + + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [1] : [] + content { + device_name = google_compute_disk.sap_hana_ha_secondary_unified_disks[count.index+1].name + source = google_compute_disk.sap_hana_ha_secondary_unified_disks[count.index+1].self_link + } + } + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_secondary_data_disks[count.index+1].name + source = google_compute_disk.sap_hana_ha_secondary_data_disks[count.index+1].self_link + } + } + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_secondary_log_disks[count.index+1].name + source = google_compute_disk.sap_hana_ha_secondary_log_disks[count.index+1].self_link + } + } + dynamic attached_disk { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_secondary_usrsap_disks[count.index+1].name + source = google_compute_disk.sap_hana_ha_secondary_usrsap_disks[count.index+1].self_link + } + } + + can_ip_forward = var.can_ip_forward + + network_interface { + subnetwork = local.subnetwork_uri + # The worker node IPs are all in one list, alternating between primary and secondary + network_ip = google_compute_address.sap_hana_ha_worker_vm_ip[count.index * 2 + 1].address + nic_type = var.nic_type == "" ? null : var.nic_type + # we only include access_config if public_ip is true, an empty access_config + # will create an ephemeral public ip + dynamic "access_config" { + for_each = var.public_ip ? [1] : [] + content { + } + } + } + + tags = local.network_tags + + service_account { + # An empty string service account will default to the projects default compute engine service account + email = var.service_account + scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + + dynamic "reservation_affinity" { + for_each = length(var.secondary_reservation_name) > 1 ? [1] : [] + content { + type = "SPECIFIC_RESERVATION" + specific_reservation { + key = "compute.googleapis.com/reservation-name" + values = [var.secondary_reservation_name] + } + } + } + + labels = local.wlm_labels + + metadata = merge( + { + startup-script = local.worker_startup_url + post_deployment_script = var.post_deployment_script + sap_deployment_debug = var.sap_deployment_debug + sap_hana_deployment_bucket = var.sap_hana_deployment_bucket + sap_hana_sid = var.sap_hana_sid + sap_hana_instance_number = var.sap_hana_instance_number + sap_hana_sidadm_password = var.sap_hana_sidadm_password + sap_hana_sidadm_password_secret = var.sap_hana_sidadm_password_secret + # wording on system_password may be inconsitent with DM + sap_hana_system_password = var.sap_hana_system_password + sap_hana_system_password_secret = var.sap_hana_system_password_secret + sap_hana_sidadm_uid = var.sap_hana_sidadm_uid + sap_hana_sapsys_gid = var.sap_hana_sapsys_gid + sap_vip = var.sap_vip + sap_vip_solution = local.sap_vip_solution + sap_hc_port = local.sap_hc_port + sap_primary_instance = var.primary_instance_name + sap_secondary_instance = var.secondary_instance_name + sap_primary_zone = var.primary_zone + sap_secondary_zone = var.secondary_zone + use_single_shared_data_log_disk = var.use_single_shared_data_log_disk + sap_hana_backup_disk = var.include_backup_disk + sap_hana_shared_disk = !var.use_single_shared_data_log_disk + sap_hana_scaleout_nodes = var.sap_hana_scaleout_nodes + majority_maker_instance_name = local.mm_fully_defined ? var.majority_maker_instance_name : "" + enable_fast_restart = var.enable_fast_restart + template-type = "TERRAFORM" + }, + local.wlm_metadata + ) + + lifecycle { + # Ignore changes in the instance metadata, since it is modified by the SAP startup script. + ignore_changes = [metadata] + } +} ################################################################################ # Optional ILB for VIP @@ -449,11 +1045,13 @@ resource "google_compute_region_backend_service" "sap_hana_ha_loadbalancer" { health_checks = [google_compute_health_check.sap_hana_ha_loadbalancer_hc.self_link] backend { - group = google_compute_instance_group.sap_hana_ha_primary_instance_group.self_link + group = google_compute_instance_group.sap_hana_ha_primary_instance_group.self_link + failover = false } backend { - group = google_compute_instance_group.sap_hana_ha_secondary_instance_group.self_link + group = google_compute_instance_group.sap_hana_ha_secondary_instance_group.self_link + failover = true } protocol = "TCP" @@ -507,6 +1105,114 @@ resource "google_compute_firewall" "sap_hana_ha_vpc_firewall" { target_tags = ["sap-${local.healthcheck_name}-port"] allow { protocol = "tcp" - ports = [local.sap_hc_port] + ports = ["${local.sap_hc_port}"] + } +} + +################################################################################ +# Local variables +################################################################################ + +resource "google_compute_disk" "sap_majority_maker_boot_disk" { + count = local.mm_fully_defined ? 1 : 0 + name = "${var.majority_maker_instance_name}-boot" + type = "pd-balanced" + zone = var.majority_maker_zone + size = local.default_boot_size + project = var.project_id + image = local.os_full_name + lifecycle { + # Ignores newer versions of the OS image. Removing this lifecycle + # and re-applying will cause the current disk to be deleted. + # All existing data will be lost. + ignore_changes = [image] + } +} + +resource "google_compute_address" "sap_hana_majority_maker_vm_ip" { + count = local.mm_fully_defined ? 1 : 0 + name = "${var.majority_maker_instance_name}-ip" + subnetwork = local.subnetwork_uri + address_type = "INTERNAL" + region = local.region + project = var.project_id +} + +resource "google_compute_instance" "sap_majority_maker_instance" { + count = local.mm_fully_defined ? 1 : 0 + name = var.majority_maker_instance_name + machine_type = var.majority_maker_machine_type + zone = var.majority_maker_zone + project = var.project_id + + min_cpu_platform = lookup(local.cpu_platform_map, var.majority_maker_machine_type, "Automatic") + boot_disk { + auto_delete = true + device_name = "boot" + source = google_compute_disk.sap_majority_maker_boot_disk[0].self_link + } + + can_ip_forward = var.can_ip_forward + network_interface { + subnetwork = local.subnetwork_uri + network_ip = google_compute_address.sap_hana_majority_maker_vm_ip.0.address + nic_type = var.nic_type == "" ? null : var.nic_type + # we only include access_config if public_ip is true, an empty access_config + # will create an ephemeral public ip + dynamic "access_config" { + for_each = var.public_ip ? [1] : [] + content { + } + } + } + tags = local.network_tags + service_account { + # The default empty service account string will use the projects default compute engine service account + email = var.service_account + scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + + metadata = merge( + { + startup-script = local.mm_startup_url + sap_deployment_debug = var.sap_deployment_debug + primary = var.primary_instance_name + secondary = var.secondary_instance_name + post_deployment_script = var.post_deployment_script + sap_hana_deployment_bucket = var.sap_hana_deployment_bucket + sap_hana_sid = var.sap_hana_sid + sap_hana_instance_number = var.sap_hana_instance_number + sap_hana_sidadm_password = var.sap_hana_sidadm_password + sap_hana_sidadm_password_secret = var.sap_hana_sidadm_password_secret + # wording on system_password may be inconsitent with DM + sap_hana_system_password = var.sap_hana_system_password + sap_hana_system_password_secret = var.sap_hana_system_password_secret + sap_hana_sidadm_uid = var.sap_hana_sidadm_uid + sap_hana_sapsys_gid = var.sap_hana_sapsys_gid + sap_vip = var.sap_vip + sap_vip_solution = local.sap_vip_solution + sap_hc_port = local.sap_hc_port + sap_primary_instance = var.primary_instance_name + sap_secondary_instance = var.secondary_instance_name + sap_primary_zone = var.primary_zone + sap_secondary_zone = var.secondary_zone + use_single_shared_data_log_disk = var.use_single_shared_data_log_disk + sap_hana_backup_disk = var.include_backup_disk + sap_hana_shared_disk = !var.use_single_shared_data_log_disk + sap_hana_scaleout_nodes = var.sap_hana_scaleout_nodes + majority_maker_instance_name = local.mm_fully_defined ? var.majority_maker_instance_name : "" + template-type = "TERRAFORM" + }, + local.wlm_metadata + ) + + lifecycle { + # Ignore changes in the instance metadata, since it is modified by the SAP startup script. + ignore_changes = [metadata] } } + + + diff --git a/modules/sap_hana_ha/outputs.tf b/modules/sap_hana_ha/outputs.tf index 77592a92..c920b284 100644 --- a/modules/sap_hana_ha/outputs.tf +++ b/modules/sap_hana_ha/outputs.tf @@ -13,15 +13,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - output "sap_hana_ha_primary_instance_self_link" { description = "Self-link for the primary SAP HANA HA instance created." value = google_compute_instance.sap_hana_ha_primary_instance.self_link } +output "sap_hana_ha_primary_worker_self_links" { + description = "Self-link for the worker nodes in the primary SAP HANA HA instance." + value = google_compute_instance.sap_hana_ha_primary_workers.*.self_link +} output "sap_hana_ha_secondary_instance_self_link" { description = "Self-link for the secondary SAP HANA HA instance created." value = google_compute_instance.sap_hana_ha_secondary_instance.self_link } +output "sap_hana_ha_secondary_worker_self_links" { + description = "Self-link for the worker nodes in the secondary SAP HANA HA instance." + value = google_compute_instance.sap_hana_ha_secondary_workers.*.self_link +} output "sap_hana_ha_loadbalander_link" { description = "Link to the optional load balancer" value = google_compute_region_backend_service.sap_hana_ha_loadbalancer.*.self_link diff --git a/modules/sap_hana_ha/variables.tf b/modules/sap_hana_ha/variables.tf index d29c168c..650b4c81 100644 --- a/modules/sap_hana_ha/variables.tf +++ b/modules/sap_hana_ha/variables.tf @@ -13,7 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - variable "project_id" { type = string description = "Project id where the instances will be created." @@ -69,7 +68,11 @@ variable "secondary_instance_name" { variable "sap_hana_deployment_bucket" { type = string - description = "The GCS bucket containing the SAP HANA media. If this is not defined, the GCE instance will be provisioned without SAP HANA installed." + description = "The Cloud Storage path that contains the SAP HANA media, do not include gs://. If this is not defined, the GCE instance will be provisioned without SAP HANA installed." + validation { + condition = (! (length(regexall( "gs:", var.sap_hana_deployment_bucket)) > 0)) + error_message = "The sap_hana_deployment_bucket must only contain the Cloud Storage path, which includes the bucket name and the names of any folders. Do not include gs://." + } default = "" } @@ -253,9 +256,262 @@ variable "post_deployment_script" { default = "" } +variable "nic_type" { + type = string + description = "Optional - This value determines the type of NIC to use, valid options are GVNIC and VIRTIO_NET. If choosing GVNIC make sure that it is supported by your OS choice here https://cloud.google.com/compute/docs/images/os-details#networking." + validation { + condition = contains(["VIRTIO_NET", "GVNIC", ""], var.nic_type) + error_message = "The nic_type must be either GVNIC or VIRTIO_NET." + } + default = "" +} + +variable "disk_type" { + type = string + description = "Optional - The default disk type to use on all disks deployed. Extreme disks are not supported on all machine types. See https://cloud.google.com/compute/docs/disks/ for details." + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme"], var.disk_type) + error_message = "The disk_type must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "pd-ssd" +} + +variable "use_single_shared_data_log_disk" { + type = bool + description = "Optional - By default three separate disk for data, logs, and shared will be made. If set to true, one disk will be used instead." + default = false +} + +variable "include_backup_disk" { + type = bool + description = "Optional - The default is true. If set creates a disk for backups." + default = true +} + +variable "sap_hana_scaleout_nodes" { + type = number + description = "Optional - Specify to add scaleout nodes to both HA instances." + default = 0 +} + +variable "majority_maker_instance_name" { + type = string + description = "Optional - Name to use for the Majority Maker instance. Must be provided if scaleout_nodes > 0." + default = "" +} + +variable "majority_maker_machine_type" { + type = string + description = "Optional - The machine type to use for the Majority Maker instance. Must be provided if scaleout_nodes > 0." + default = "" +} + +variable "majority_maker_zone" { + type = string + description = "Optional - The zone in which the Majority Maker instance will be deployed. Must be provided if scaleout_nodes > 0. It is recommended for this to be different from the zones the primary and secondary instance are deployed in." + default = "" +} + +variable "primary_static_ip" { + type = string + description = "Optional - Defines an internal static IP for the primary VM." + validation { + condition = var.primary_static_ip == "" || can(regex("^(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$", var.primary_static_ip)) + error_message = "The primary_static_ip must be a valid IP address." + } + default = "" +} + +variable "secondary_static_ip" { + type = string + description = "Optional - Defines an internal static IP for the secondary VM." + validation { + condition = var.secondary_static_ip == "" || can(regex("^(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$", var.secondary_static_ip)) + error_message = "The secondary_static_ip must be a valid IP address." + } + default = "" +} + +variable "primary_worker_static_ips" { + type = list(string) + description = "Optional - Defines internal static IP addresses for the primary worker nodes." + validation { + condition = alltrue([ + for ip in var.primary_worker_static_ips : ip == "" || can(regex("^(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$", ip)) + ]) + error_message = "All primary_worker_static_ips must be valid IP addresses." + } + default = [] +} + +variable "secondary_worker_static_ips" { + type = list(string) + description = "Optional - Defines internal static IP addresses for the secondary worker nodes." + validation { + condition = alltrue([ + for ip in var.secondary_worker_static_ips : ip == "" || can(regex("^(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$", ip)) + ]) + error_message = "All secondary_worker_static_ips must be valid IP addresses." + } + default = [] +} + + +variable "backup_disk_type" { + type = string + description = "Optional - The default is pd-balanced, only used if a backup disk is needed." + default = "pd-balanced" + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", "pd-standard"], var.backup_disk_type) + error_message = "The disk_type must be either pd-ssd, pd-balanced, pd-standard, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } +} + +variable "hyperdisk_balanced_iops_default" { + type = number + description = "Optional - default is 3000. Number of IOPS that is set for each disk of type Hyperdisk-balanced (except for boot/usrsap/shared disks)." + default = 3000 +} + +variable "hyperdisk_balanced_throughput_default" { + type = number + description = "Optional - default is 750. Throughput in MB/s that is set for each disk of type Hyperdisk-balanced (except for boot/usrsap/shared disks)." + default = 750 +} + +variable "enable_fast_restart" { + type = bool + description = "Optional - The default is true. If set enables HANA Fast Restart." + default = true +} + # -# DO NOT MODIFY unless you know what you are doing +# DO NOT MODIFY unless instructed or aware of the implications of using those settings # + +variable "data_disk_type_override" { + type = string + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the data disk." + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.data_disk_type_override) + error_message = "The data_disk_type_override must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "" +} +variable "log_disk_type_override" { + type = string + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the log disk." + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.log_disk_type_override) + error_message = "The log_disk_type_override must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "" +} +variable "shared_disk_type_override" { + type = string + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the shared disk." + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.shared_disk_type_override) + error_message = "The shared_disk_type_override must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "" +} +variable "usrsap_disk_type_override" { + type = string + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the /usr/sap disk." + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.usrsap_disk_type_override) + error_message = "The usrsap_disk_type_override must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "" +} +variable "unified_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the primary disk(s), that is based off of the machine_type." + default = null +} +variable "data_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the data disk(s), that is based off of the machine_type." + default = null +} +variable "log_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the log disk(s), that is based off of the machine_type." + default = null +} +variable "shared_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the shared disk, that is based off of the machine_type." + default = null +} +variable "usrsap_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the /usr/sap disk(s), that is based off of the machine_type." + default = null +} +variable "unified_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the primary's unified disk will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "data_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the data disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "log_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the log disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "shared_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the shared disk will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "usrsap_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the /usr/sap disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "backup_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the backup disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} + +variable "unified_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the primary's unified disk will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "data_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the data disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "log_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the log disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "shared_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the shared disk will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "usrsap_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the /usr/sap disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "backup_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the backup disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} + variable "wlm_deployment_name" { type = string description = "Deployment name to be used for integrating into Work Load Management." @@ -271,17 +527,29 @@ variable "is_work_load_management_deployment" { variable "primary_startup_url" { type = string description = "Startup script to be executed when the VM boots, should not be overridden." - default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202210141928/terraform/sap_hana_ha/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202210141928/terraform" + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform/sap_hana_ha/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform" +} + +variable "worker_startup_url" { + type = string + description = "Startup script to be executed when the worker VM boots, should not be overridden." + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform/sap_hana_ha/startup_worker.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform" } variable "secondary_startup_url" { type = string - default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202210141928/terraform/sap_hana_ha/startup_secondary.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202210141928/terraform" + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform/sap_hana_ha/startup_secondary.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform" description = "DO NOT USE" } +variable "majority_maker_startup_url" { + type = string + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform/sap_hana_ha/startup_majority_maker.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform" + description = "DO NOT USE" +} variable "can_ip_forward" { type = bool description = "Whether sending and receiving of packets with non-matching source or destination IPs is allowed." default = true } + diff --git a/modules/sap_hana_ha/versions.tf b/modules/sap_hana_ha/versions.tf index aa26f9ac..73afcc7d 100644 --- a/modules/sap_hana_ha/versions.tf +++ b/modules/sap_hana_ha/versions.tf @@ -13,17 +13,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - terraform { required_version = ">=0.12.6" required_providers { - google = { - source = "hashicorp/google" - version = ">= 4.0.0, < 6" + google = {} + assert = { + source = "bwoznicki/assert" + version = "0.0.1" + } + validation = { + source = "tlkamp/validation" + version = "1.0.0" } - } - - provider_meta "google" { - module_name = "blueprints/terraform/terraform-google-sap:sap_hana_ha/v1.1.1" } } diff --git a/modules/sap_hana_scaleout/main.tf b/modules/sap_hana_scaleout/main.tf index 586a646c..0f0b142c 100644 --- a/modules/sap_hana_scaleout/main.tf +++ b/modules/sap_hana_scaleout/main.tf @@ -13,13 +13,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - # # Terraform SAP HANA Scaleout for Google Cloud # # -# Version: BUILD.VERSION -# Build Hash: BUILD.HASH +# Version: 2.0.202402230649 +# Build Hash: c745a89b214d491fa9b641e2fff78abfe9965016 # ################################################################################ @@ -32,7 +31,7 @@ locals { "n1-highmem-96" = 624 "n1-megamem-96" = 1433 "n2-highmem-32" = 256 - "n2-highmem-48" = 386 + "n2-highmem-48" = 384 "n2-highmem-64" = 512 "n2-highmem-80" = 640 "n2-highmem-96" = 768 @@ -44,18 +43,21 @@ locals { "m1-ultramem-40" = 961 "m1-ultramem-80" = 1922 "m1-ultramem-160" = 3844 - "m2-ultramem-208" = 5916 - "m2-megamem-416" = 5916 + "m2-ultramem-208" = 5888 + "m2-megamem-416" = 5888 "m2-hypermem-416" = 8832 - "m2-ultramem-416" = 11832 + "m2-ultramem-416" = 11744 "m3-megamem-64" = 976 "m3-megamem-128" = 1952 "m3-ultramem-32" = 976 "m3-ultramem-64" = 1952 "m3-ultramem-128" = 3904 + "c3-standard-44" = 176 + "c3-highmem-44" = 352 + "c3-highmem-88" = 704 + "c3-highmem-176" = 1408 } cpu_platform_map = { - "n1-standard-16" = "Intel Haswell" "n1-highmem-32" = "Intel Broadwell" "n1-highmem-64" = "Intel Broadwell" "n1-highmem-96" = "Intel Skylake" @@ -82,13 +84,100 @@ locals { "m3-ultramem-32" = "Automatic" "m3-ultramem-64" = "Automatic" "m3-ultramem-128" = "Automatic" + "c3-standard-44" = "Automatic" + "c3-highmem-44" = "Automatic" + "c3-highmem-88" = "Automatic" + "c3-highmem-176" = "Automatic" + } + + # Minimum disk sizes are used to ensure throughput. Extreme disks don't need this. + # All 'over provisioned' capacity is to go onto the data disk. + min_total_disk_map = { + "pd-ssd" = 550 + "pd-balanced" = 943 + "pd-extreme" = 0 + "hyperdisk-balanced" = 0 + "hyperdisk-extreme" = 0 + } + + min_total_disk = local.min_total_disk_map[var.disk_type] + + mem_size = lookup(local.mem_size_map, var.machine_type, 320) + hana_log_size = ceil(min(512, max(64, local.mem_size / 2))) + hana_data_size_min = ceil(local.mem_size * 12 / 10) + + hana_data_size = max(local.hana_data_size_min, local.min_total_disk - local.hana_log_size ) + pd_size = ceil(max(local.min_total_disk, local.hana_log_size + local.hana_data_size_min + 1)) + + final_data_disk_type = var.data_disk_type_override == "" ? var.disk_type : var.data_disk_type_override + final_log_disk_type = var.log_disk_type_override == "" ? var.disk_type : var.log_disk_type_override + + unified_pd_size = var.unified_disk_size_override == null ? ceil(local.pd_size) : var.unified_disk_size_override + data_pd_size = var.data_disk_size_override == null ? local.hana_data_size : var.data_disk_size_override + log_pd_size = var.log_disk_size_override == null ? local.hana_log_size : var.log_disk_size_override + + # IOPS + hdx_iops_map = { + "data" = max(10000, local.data_pd_size*2) + "log" = max(10000, local.log_pd_size*2) + "shared" = null + "usrsap" = null + "unified" = max(10000, local.data_pd_size*2) + max(10000, local.log_pd_size*2) + "worker" = max(10000, local.data_pd_size*2) + max(10000, local.log_pd_size*2) + } + hdb_iops_map = { + "data" = var.hyperdisk_balanced_iops_default + "log" = var.hyperdisk_balanced_iops_default + "shared" = null + "usrsap" = null + "unified" = var.hyperdisk_balanced_iops_default + "worker" = var.hyperdisk_balanced_iops_default + } + null_iops_map = { + "data" = null + "log" = null + "shared" = null + "usrsap" = null + "unified" = null + "worker" = null + } + iops_map = { + "pd-ssd" = local.null_iops_map + "pd-balanced" = local.null_iops_map + "pd-extreme" = local.hdx_iops_map + "hyperdisk-balanced" = local.hdb_iops_map + "hyperdisk-extreme" = local.hdx_iops_map } - mem_size = lookup(local.mem_size_map, var.machine_type, 320) - sap_hana_log_size_min = min(512, max(64, local.mem_size / 2)) - sap_hana_data_size_min = local.mem_size * 12 / 10 - sap_hana_log_size = local.sap_hana_log_size_min - sap_hana_data_size = local.sap_hana_data_size_min + final_data_iops = var.data_disk_iops_override == null ? local.iops_map[local.final_data_disk_type]["data"] : var.data_disk_iops_override + final_log_iops = var.log_disk_iops_override == null ? local.iops_map[local.final_log_disk_type]["log"] : var.log_disk_iops_override + final_unified_iops = var.unified_disk_iops_override == null ? local.iops_map[var.disk_type]["unified"] : var.unified_disk_iops_override + + # THROUGHPUT + hdb_throughput_map = { + "data" = var.hyperdisk_balanced_throughput_default + "log" = var.hyperdisk_balanced_throughput_default + "unified" = var.hyperdisk_balanced_throughput_default + } + null_throughput_map = { + "data" = null + "log" = null + "unified" = null + } + throughput_map = { + "pd-ssd" = local.null_throughput_map + "pd-balanced" = local.null_throughput_map + "pd-extreme" = local.null_throughput_map + "hyperdisk-balanced" = local.hdb_throughput_map + "hyperdisk-extreme" = local.null_throughput_map + } + + final_data_throughput = var.data_disk_throughput_override == null ? local.throughput_map[local.final_data_disk_type]["data"] : var.data_disk_throughput_override + final_log_throughput = var.log_disk_throughput_override == null ? local.throughput_map[local.final_log_disk_type]["log"] : var.log_disk_throughput_override + final_unified_throughput = var.unified_disk_throughput_override == null ? local.throughput_map[var.disk_type]["unified"] : var.unified_disk_throughput_override + + primary_startup_url = var.sap_deployment_debug ? replace(var.primary_startup_url, "bash -s", "bash -x -s") : var.primary_startup_url + secondary_startup_url = var.sap_deployment_debug ? replace(var.secondary_startup_url, "bash -s", "bash -x -s") : var.secondary_startup_url zone_split = split("-", var.zone) region = "${local.zone_split[0]}-${local.zone_split[1]}" @@ -96,10 +185,6 @@ locals { subnetwork_uri = length(local.subnetwork_split) > 1 ? ( "projects/${local.subnetwork_split[0]}/regions/${local.region}/subnetworks/${local.subnetwork_split[1]}") : ( "projects/${var.project_id}/regions/${local.region}/subnetworks/${var.subnetwork}") - - pdssd_size = ceil(max(834, local.sap_hana_log_size + local.sap_hana_data_size + 1)) - primary_startup_url = var.sap_deployment_debug ? replace(var.primary_startup_url, "bash -s", "bash -x -s") : var.primary_startup_url - secondary_startup_url = var.sap_deployment_debug ? replace(var.secondary_startup_url, "bash -s", "bash -x -s") : var.secondary_startup_url } ################################################################################ @@ -109,7 +194,7 @@ resource "google_compute_disk" "sap_hana_scaleout_boot_disks" { # Need a disk for primary, worker nodes, standby nodes count = var.sap_hana_worker_nodes + var.sap_hana_standby_nodes + 1 name = count.index == 0 ? "${var.instance_name}-boot" : "${var.instance_name}w${count.index}-boot" - type = "pd-standard" + type = "pd-balanced" zone = var.zone size = 45 project = var.project_id @@ -123,14 +208,38 @@ resource "google_compute_disk" "sap_hana_scaleout_boot_disks" { } } -resource "google_compute_disk" "sap_hana_scaleout_pd_disks" { +resource "google_compute_disk" "sap_hana_scaleout_disks" { # Need a pd disk for primary, worker nodes - count = var.sap_hana_worker_nodes + 1 - name = format("${var.instance_name}-mnt%05d", count.index + 1) - type = "pd-ssd" + count = var.use_single_data_log_disk ? var.sap_hana_worker_nodes + 1 : 0 + name = format("${var.instance_name}-hana%05d", count.index + 1) + type = var.disk_type zone = var.zone - size = local.pdssd_size + size = local.unified_pd_size project = var.project_id + provisioned_iops = local.final_unified_iops + provisioned_throughput = local.final_unified_throughput +} + +resource "google_compute_disk" "sap_hana_data_disks" { + count = var.use_single_data_log_disk ? 0 : var.sap_hana_worker_nodes + 1 + name = format("${var.instance_name}-data%05d", count.index + 1) + type = local.final_data_disk_type + zone = var.zone + size = local.data_pd_size + project = var.project_id + provisioned_iops = local.final_data_iops + provisioned_throughput = local.final_data_throughput +} + +resource "google_compute_disk" "sap_hana_log_disks" { + count = var.use_single_data_log_disk ? 0 : var.sap_hana_worker_nodes + 1 + name = format("${var.instance_name}-log%05d", count.index + 1) + type = local.final_log_disk_type + zone = var.zone + size = local.log_pd_size + project = var.project_id + provisioned_iops = local.final_log_iops + provisioned_throughput = local.final_log_throughput } ################################################################################ @@ -143,6 +252,7 @@ resource "google_compute_address" "sap_hana_vm_ip" { address_type = "INTERNAL" region = local.region project = var.project_id + address = var.vm_static_ip } resource "google_compute_address" "sap_hana_worker_ip" { count = var.sap_hana_worker_nodes @@ -151,6 +261,7 @@ resource "google_compute_address" "sap_hana_worker_ip" { address_type = "INTERNAL" region = local.region project = var.project_id + address = length(var.worker_static_ips) > count.index ? var.worker_static_ips[count.index] : "" } resource "google_compute_address" "sap_hana_standby_ip" { count = var.sap_hana_standby_nodes @@ -159,6 +270,7 @@ resource "google_compute_address" "sap_hana_standby_ip" { address_type = "INTERNAL" region = local.region project = var.project_id + address = length(var.standby_static_ips) > count.index ? var.standby_static_ips[count.index] : "" } ################################################################################ @@ -179,17 +291,34 @@ resource "google_compute_instance" "sap_hana_scaleout_primary_instance" { source = google_compute_disk.sap_hana_scaleout_boot_disks[0].self_link } - attached_disk { - # we only attach the PDs to the primary and workers - device_name = google_compute_disk.sap_hana_scaleout_pd_disks[0].name - source = google_compute_disk.sap_hana_scaleout_pd_disks[0].self_link + dynamic attached_disk { + for_each = var.use_single_data_log_disk ? [1] : [] + content { + device_name = google_compute_disk.sap_hana_scaleout_disks[0].name + source = google_compute_disk.sap_hana_scaleout_disks[0].self_link + } + } + dynamic attached_disk { + for_each = var.use_single_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_data_disks[0].name + source = google_compute_disk.sap_hana_data_disks[0].self_link + } + } + dynamic attached_disk { + for_each = var.use_single_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_log_disks[0].name + source = google_compute_disk.sap_hana_log_disks[0].self_link + } } - can_ip_forward = var.can_ip_forward network_interface { subnetwork = local.subnetwork_uri network_ip = google_compute_address.sap_hana_vm_ip.address + nic_type = var.nic_type == "" ? null : var.nic_type + # we only include access_config if public_ip is true, an empty access_config # will create an ephemeral public ip dynamic "access_config" { @@ -230,7 +359,6 @@ resource "google_compute_instance" "sap_hana_scaleout_primary_instance" { sap_hana_instance_number = var.sap_hana_instance_number sap_hana_sidadm_password = var.sap_hana_sidadm_password sap_hana_sidadm_password_secret = var.sap_hana_sidadm_password_secret - # wording on system_password may be inconsitent with DM sap_hana_system_password = var.sap_hana_system_password sap_hana_system_password_secret = var.sap_hana_system_password_secret sap_hana_sidadm_uid = var.sap_hana_sidadm_uid @@ -239,6 +367,8 @@ resource "google_compute_instance" "sap_hana_scaleout_primary_instance" { sap_hana_standby_nodes = var.sap_hana_standby_nodes sap_hana_shared_nfs = var.sap_hana_shared_nfs sap_hana_backup_nfs = var.sap_hana_backup_nfs + use_single_data_log_disk = var.use_single_data_log_disk + sap_hana_data_disk_type = local.final_data_disk_type template-type = "TERRAFORM" } @@ -264,17 +394,33 @@ resource "google_compute_instance" "sap_hana_scaleout_worker_instances" { source = google_compute_disk.sap_hana_scaleout_boot_disks[count.index + 1].self_link } - attached_disk { - # we only attach the PDs to the primary and workers - device_name = google_compute_disk.sap_hana_scaleout_pd_disks[count.index + 1].name - source = google_compute_disk.sap_hana_scaleout_pd_disks[count.index + 1].self_link + dynamic attached_disk { + for_each = var.use_single_data_log_disk ? [1] : [] + content { + device_name = google_compute_disk.sap_hana_scaleout_disks[count.index + 1].name + source = google_compute_disk.sap_hana_scaleout_disks[count.index + 1].self_link + } + } + dynamic attached_disk { + for_each = var.use_single_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_data_disks[count.index + 1].name + source = google_compute_disk.sap_hana_data_disks[count.index + 1].self_link + } + } + dynamic attached_disk { + for_each = var.use_single_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_log_disks[count.index + 1].name + source = google_compute_disk.sap_hana_log_disks[count.index + 1].self_link + } } - can_ip_forward = var.can_ip_forward network_interface { subnetwork = local.subnetwork_uri network_ip = google_compute_address.sap_hana_worker_ip[count.index].address + nic_type = var.nic_type == "" ? null : var.nic_type # we only include access_config if public_ip is true, an empty access_config # will create an ephemeral public ip dynamic "access_config" { @@ -324,6 +470,7 @@ resource "google_compute_instance" "sap_hana_scaleout_worker_instances" { sap_hana_backup_nfs = var.sap_hana_backup_nfs sap_hana_worker_nodes = var.sap_hana_worker_nodes sap_hana_standby_nodes = var.sap_hana_standby_nodes + use_single_data_log_disk = var.use_single_data_log_disk template-type = "TERRAFORM" } @@ -359,6 +506,7 @@ resource "google_compute_instance" "sap_hana_scaleout_standby_instances" { network_interface { subnetwork = local.subnetwork_uri network_ip = google_compute_address.sap_hana_standby_ip[count.index].address + nic_type = var.nic_type == "" ? null : var.nic_type # we only include access_config if public_ip is true, an empty access_config # will create an ephemeral public ip dynamic "access_config" { diff --git a/modules/sap_hana_scaleout/outputs.tf b/modules/sap_hana_scaleout/outputs.tf index d5a139cd..e832d6a3 100644 --- a/modules/sap_hana_scaleout/outputs.tf +++ b/modules/sap_hana_scaleout/outputs.tf @@ -13,7 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - output "sap_hana_primary_self_link" { description = "Self-link for the primary SAP HANA Scalout instance created." value = google_compute_instance.sap_hana_scaleout_primary_instance.self_link diff --git a/modules/sap_hana_scaleout/variables.tf b/modules/sap_hana_scaleout/variables.tf index d8c55f23..8c212bde 100644 --- a/modules/sap_hana_scaleout/variables.tf +++ b/modules/sap_hana_scaleout/variables.tf @@ -13,7 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - variable "project_id" { type = string description = "Project id where the instances will be created." @@ -64,7 +63,11 @@ variable "sap_hana_sid" { variable "sap_hana_deployment_bucket" { type = string - description = "The GCS bucket containing the SAP HANA media. If this is not defined, the GCE instance will be provisioned without SAP HANA installed." + description = "The Cloud Storage path that contains the SAP HANA media, do not include gs://. If this is not defined, the GCE instance will be provisioned without SAP HANA installed." + validation { + condition = (! (length(regexall( "gs:", var.sap_hana_deployment_bucket)) > 0)) + error_message = "The sap_hana_deployment_bucket must only contain the Cloud Storage path, which includes the bucket name and the names of any folders. Do not include gs://." + } default = "" } @@ -143,11 +146,19 @@ variable "sap_hana_standby_nodes" { variable "sap_hana_shared_nfs" { type = string + validation { + condition = var.sap_hana_shared_nfs == "" || can(regex("(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}:\\/[^[:space:]]*", var.sap_hana_shared_nfs)) + error_message = "The sap_hana_shared_nfs must be an IP address followed by ':/' then some name." + } description = "Google Filestore share for /hana/shared" } variable "sap_hana_backup_nfs" { type = string + validation { + condition = var.sap_hana_backup_nfs == "" || can(regex("(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}:\\/[^[:space:]]*", var.sap_hana_backup_nfs)) + error_message = "The sap_hana_backup_nfs must be an IP address followed by ':/' then some name." + } description = "Google Filestore share for /hanabackup" } @@ -157,6 +168,12 @@ variable "sap_hana_sidadm_uid" { default = 900 } +variable "sap_hana_sapsys_gid" { + type = number + description = "The Linux GID of the SAPSYS group. By default this is set to 79" + default = 79 +} + variable "network_tags" { type = list(string) description = "OPTIONAL - Network tags can be associated to your instance on deployment. This can be used for firewalling or routing purposes." @@ -206,18 +223,153 @@ variable "post_deployment_script" { default = "" } +variable "nic_type" { + type = string + description = "Optional - This value determines the type of NIC to use, valid options are GVNIC and VIRTIO_NET. If choosing GVNIC make sure that it is supported by your OS choice here https://cloud.google.com/compute/docs/images/os-details#networking." + validation { + condition = contains(["VIRTIO_NET", "GVNIC", ""], var.nic_type) + error_message = "The nic_type must be either GVNIC or VIRTIO_NET." + } + default = "" +} + +variable "disk_type" { + type = string + description = "Optional - The default disk type to use on all disks deployed. Extreme disks are not supported on all machine types. See https://cloud.google.com/compute/docs/disks/ for details." + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme"], var.disk_type) + error_message = "The disk_type must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "pd-ssd" +} + +variable "use_single_data_log_disk" { + type = bool + description = "Optional - By default two separate disk for data and logs will be made. If set to true, one disk will be used instead." + default = false +} + +variable "hyperdisk_balanced_iops_default" { + type = number + description = "Optional - default is 3000. Number of IOPS that is set for each disk of type Hyperdisk-balanced (except for boot disk)." + default = 3000 +} + +variable "hyperdisk_balanced_throughput_default" { + type = number + description = "Optional - default is 750. Throughput in MB/s that is set for each disk of type Hyperdisk-balanced (except for boot disk)." + default = 750 +} + +variable "vm_static_ip" { + type = string + description = "Optional - Defines an internal static IP for the VM." + validation { + condition = var.vm_static_ip == "" || can(regex("^(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$", var.vm_static_ip)) + error_message = "The vm_static_ip must be a valid IP address." + } + default = "" +} + +variable "worker_static_ips" { + type = list(string) + description = "Optional - Defines internal static IP addresses for the worker nodes." + validation { + condition = alltrue([ + for ip in var.worker_static_ips : ip == "" || can(regex("^(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$", ip)) + ]) + error_message = "All worker_static_ips must be valid IP addresses." + } + default = [] +} + +variable "standby_static_ips" { + type = list(string) + description = "Optional - Defines internal static IP addresses for the standby nodes." + validation { + condition = alltrue([ + for ip in var.standby_static_ips : ip == "" || can(regex("^(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$", ip)) + ]) + error_message = "All standby_static_ips must be valid IP addresses." + } + default = [] +} + # -# DO NOT MODIFY unless you know what you are doing +# DO NOT MODIFY unless instructed or aware of the implications of using those settings # +variable "data_disk_type_override" { + type = string + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the data disk." + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.data_disk_type_override) + error_message = "The data_disk_type_override must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "" +} +variable "log_disk_type_override" { + type = string + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the log disk." + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.log_disk_type_override) + error_message = "The log_disk_type_override must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "" +} +variable "unified_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the primary's unified disk, that is based off of the machine_type." + default = null +} +variable "data_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the data disk(s), that is based off of the machine_type." + default = null +} +variable "log_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the log disk(s), that is based off of the machine_type." + default = null +} +variable "unified_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the primary's unified disk will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "data_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the data disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "log_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the log disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "unified_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the primary's unified disk will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "data_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the data disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "log_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the log disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} variable "primary_startup_url" { type = string description = "Startup script to be executed when the VM boots, should not be overridden." - default = "curl -s BUILD.TERRA_SH_URL/sap_hana_scaleout/startup.sh | bash -s BUILD.TERRA_SH_URL" + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform/sap_hana_scaleout/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform" } variable "secondary_startup_url" { type = string - default = "curl -s BUILD.TERRA_SH_URL/sap_hana_scaleout/startup_secondary.sh | bash -s BUILD.TERRA_SH_URL" + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform/sap_hana_scaleout/startup_secondary.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform" description = "DO NOT USE" } @@ -226,3 +378,4 @@ variable "can_ip_forward" { description = "Whether sending and receiving of packets with non-matching source or destination IPs is allowed." default = true } + diff --git a/modules/sap_hana_scaleout/versions.tf b/modules/sap_hana_scaleout/versions.tf index f10818a8..f543e7b0 100644 --- a/modules/sap_hana_scaleout/versions.tf +++ b/modules/sap_hana_scaleout/versions.tf @@ -13,17 +13,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - terraform { required_version = ">=0.12.6" required_providers { - google = { - source = "hashicorp/google" - version = ">= 4.0.0, < 6" - } - } - - provider_meta "google" { - module_name = "blueprints/terraform/terraform-google-sap:sap_hana_scaleout/v1.1.1" + google = {} } } diff --git a/modules/sap_nw/main.tf b/modules/sap_nw/main.tf new file mode 100644 index 00000000..54448ebc --- /dev/null +++ b/modules/sap_nw/main.tf @@ -0,0 +1,185 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +# +# Terraform SAP NW for Google Cloud +# +# Version: 2.0.202402230649 +# Build Hash: c745a89b214d491fa9b641e2fff78abfe9965016 +# + +################################################################################ +# Local variables +################################################################################ +locals { + zone_split = split("-", var.zone) + region = "${local.zone_split[0]}-${local.zone_split[1]}" + subnetwork_split = split("/", var.subnetwork) + subnetwork_uri = length(local.subnetwork_split) > 1 ? ( + "projects/${local.subnetwork_split[0]}/regions/${local.region}/subnetworks/${local.subnetwork_split[1]}") : ( + "projects/${var.project_id}/regions/${local.region}/subnetworks/${var.subnetwork}") + + + cpu_map = { + "n1-highmem-96" : "Intel Skylake", + "n1-megamem-96" : "Intel Skylake", + } + primary_startup_url = var.sap_deployment_debug ? replace(var.primary_startup_url, "bash -s", "bash -x -s") : var.primary_startup_url +} + +################################################################################ +# disks +################################################################################ +resource "google_compute_disk" "sap_nw_boot_disk" { + name = "${var.instance_name}-boot" + type = "pd-balanced" + zone = var.zone + size = 30 # GB + project = var.project_id + image = "${var.linux_image_project}/${var.linux_image}" + + lifecycle { + # Ignores newer versions of the OS image. Removing this lifecycle + # and re-applying will cause the current disk to be deleted. + # All existing data will be lost. + ignore_changes = [image] + } +} + +resource "google_compute_disk" "sap_nw_usrsap_disk" { + count = var.usr_sap_size > 0 ? 1 : 0 + name = "${var.instance_name}-usrsap" + type = "pd-balanced" + zone = var.zone + size = var.usr_sap_size + project = var.project_id +} + +resource "google_compute_disk" "sap_nw_swap_disk" { + count = var.swap_size > 0 ? 1 : 0 + name = "${var.instance_name}-swap" + type = "pd-balanced" + zone = var.zone + size = var.swap_size + project = var.project_id +} + +resource "google_compute_disk" "sap_nw_sapmnt_disk" { + count = var.sap_mnt_size > 0 ? 1 : 0 + name = "${var.instance_name}-sapmnt" + type = "pd-balanced" + size = var.sap_mnt_size + zone = var.zone + project = var.project_id +} + +################################################################################ +# VIPs +################################################################################ +resource "google_compute_address" "sap_nw_vm_ip" { + name = var.instance_name + subnetwork = local.subnetwork_uri + address_type = "INTERNAL" + region = local.region + project = var.project_id +} +################################################################################ +# instances +################################################################################ +resource "google_compute_instance" "sap_nw_instance" { + name = var.instance_name + machine_type = var.machine_type + zone = var.zone + project = var.project_id + min_cpu_platform = lookup(local.cpu_map, var.machine_type, "Automatic") + + boot_disk { + auto_delete = true + device_name = "boot" + source = google_compute_disk.sap_nw_boot_disk.self_link + } + + dynamic "attached_disk" { + for_each = var.usr_sap_size > 0 ? [1] : [] + content { + device_name = google_compute_disk.sap_nw_usrsap_disk[0].name + source = google_compute_disk.sap_nw_usrsap_disk[0].self_link + } + } + + dynamic "attached_disk" { + for_each = var.sap_mnt_size > 0 ? [1] : [] + content { + device_name = google_compute_disk.sap_nw_sapmnt_disk[0].name + source = google_compute_disk.sap_nw_sapmnt_disk[0].self_link + } + } + + dynamic "attached_disk" { + for_each = var.swap_size > 0 ? [1] : [] + content { + device_name = google_compute_disk.sap_nw_swap_disk[0].name + source = google_compute_disk.sap_nw_swap_disk[0].self_link + } + } + + can_ip_forward = var.can_ip_forward + + network_interface { + subnetwork = local.subnetwork_uri + network_ip = google_compute_address.sap_nw_vm_ip.address + + # we only include access_config if public_ip is true, an empty access_config + # will create an ephemeral public ip + dynamic "access_config" { + for_each = var.public_ip ? [1] : [] + content { + } + } + } + + tags = var.network_tags + + service_account { + # An empty string service account will default to the projects default compute engine service account + email = var.service_account + scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + + dynamic "reservation_affinity" { + for_each = length(var.reservation_name) > 1 ? [1] : [] + content { + type = "SPECIFIC_RESERVATION" + specific_reservation { + key = "compute.googleapis.com/reservation-name" + values = [var.reservation_name] + } + } + } + + metadata = { + startup-script = local.primary_startup_url + post_deployment_script = var.post_deployment_script + sap_deployment_debug = var.sap_deployment_debug + template-type = "TERRAFORM" + } + + lifecycle { + # Ignore changes in the instance metadata, since it is modified by the SAP startup script. + ignore_changes = [metadata] + } +} diff --git a/modules/sap_nw/outputs.tf b/modules/sap_nw/outputs.tf new file mode 100644 index 00000000..4a939a43 --- /dev/null +++ b/modules/sap_nw/outputs.tf @@ -0,0 +1,19 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +output "sap_nw_self_link" { + description = "SAP NW self-link for instance created" + value = google_compute_instance.sap_nw_instance.self_link +} diff --git a/modules/sap_nw/variables.tf b/modules/sap_nw/variables.tf new file mode 100644 index 00000000..b624b140 --- /dev/null +++ b/modules/sap_nw/variables.tf @@ -0,0 +1,147 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +variable "project_id" { + type = string + description = "Project id where the instances will be created." +} + +variable "zone" { + type = string + description = "Zone where the instances will be created." +} + +variable "machine_type" { + type = string + description = "Machine type for the instances." +} + +variable "subnetwork" { + type = string + description = "The sub network to deploy the instance in." +} + +variable "linux_image" { + type = string + description = "Linux image name to use." +} + +variable "linux_image_project" { + type = string + description = "The project which the Linux image belongs to." +} + +variable "instance_name" { + type = string + description = "Hostname of the GCE instance." + validation { + condition = can(regex("^[a-z0-9\\-]+$", var.instance_name)) + error_message = "The instance_name must consist of lowercase letters (a-z), numbers, and hyphens." + } +} + +variable "usr_sap_size" { + type = number + description = "Size of /usr/sap in GB" + default = 8 + validation { + condition = var.usr_sap_size >= 8 + error_message = "Size of /usr/sap must be larger than 8 GB." + } +} + +variable "sap_mnt_size" { + type = number + description = "Size of /sapmnt in GB" + default = 8 + validation { + condition = var.sap_mnt_size >= 8 + error_message = "Size of /sapmnt must be larger than 8 GB." + } +} + +variable "swap_size" { + type = number + description = "Size in GB of swap volume" + default = 8 + validation { + condition = var.swap_size >= 8 + error_message = "Size of swap must be larger than 8 GB." + } +} + +variable "network_tags" { + type = list(string) + description = "OPTIONAL - Network tags can be associated to your instance on deployment. This can be used for firewalling or routing purposes." + default = [] +} + +variable "public_ip" { + type = bool + description = "OPTIONAL - Defines whether a public IP address should be added to your VM. By default this is set to Yes. Note that if you set this to No without appropriate network nat and tags in place, there will be no route to the internet and thus the installation will fail." + default = true +} + +variable "service_account" { + type = string + description = "OPTIONAL - Ability to define a custom service account instead of using the default project service account." + default = "" +} + +variable "sap_deployment_debug" { + type = bool + description = "OPTIONAL - If this value is set to true, the deployment will generates verbose deployment logs. Only turn this setting on if a Google support engineer asks you to enable debugging." + default = false +} + +variable "reservation_name" { + type = string + description = <<-EOT + Use a reservation specified by RESERVATION_NAME. + By default ANY_RESERVATION is used when this variable is empty. + In order for a reservation to be used it must be created with the + "Select specific reservation" selected (specificReservationRequired set to true) + Be sure to create your reservation with the correct Min CPU Platform for the + following instance types: + n1-highmem-32 : Intel Broadwell + n1-highmem-64 : Intel Broadwell + n1-highmem-96 : Intel Skylake + n1-megamem-96 : Intel Skylake + m1-megamem-96 : Intel Skylake + All other instance types can have automatic Min CPU Platform" + EOT + default = "" +} + +variable "post_deployment_script" { + type = string + description = "OPTIONAL - gs:// or https:// location of a script to execute on the created VM's post deployment." + default = "" +} + +# +# DO NOT MODIFY unless you know what you are doing +# +variable "primary_startup_url" { + type = string + description = "Startup script to be executed when the VM boots, should not be overridden." + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform/sap_nw/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform" +} + +variable "can_ip_forward" { + type = bool + description = "Whether sending and receiving of packets with non-matching source or destination IPs is allowed." + default = true +} diff --git a/modules/sap_nw/versions.tf b/modules/sap_nw/versions.tf new file mode 100644 index 00000000..f543e7b0 --- /dev/null +++ b/modules/sap_nw/versions.tf @@ -0,0 +1,21 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +terraform { + required_version = ">=0.12.6" + required_providers { + google = {} + } +} diff --git a/modules/sap_nw_ha/main.tf b/modules/sap_nw_ha/main.tf new file mode 100644 index 00000000..66e44460 --- /dev/null +++ b/modules/sap_nw_ha/main.tf @@ -0,0 +1,425 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +# +# Terraform SAP NW HA for Google Cloud +# +# Version: 2.0.202402230649 +# Build Hash: c745a89b214d491fa9b641e2fff78abfe9965016 +# + +################################################################################ +# Local variables +################################################################################ +locals { + primary_region = regex("[a-z]*-[a-z1-9]*", var.sap_primary_zone) + secondary_region = regex("[a-z]*-[a-z1-9]*", var.sap_secondary_zone) + region = local.primary_region + subnetwork_split = split("/", var.subnetwork) + split_network = split("/", var.network) + is_vpc_network = length(local.split_network) > 1 + ascs = var.sap_nw_abap == true ? "A" : "" + + sid = lower(var.sap_sid) + + hc_firewall_rule_name = var.hc_firewall_rule_name == "" ? "${local.sid}-hc-allow" : var.hc_firewall_rule_name + hc_network_tag = length(var.hc_network_tag) == 0 ? ["${local.hc_firewall_rule_name}"] : var.hc_network_tag + + sap_scs_instance_number = var.sap_scs_instance_number == "" ? "00" : var.sap_scs_instance_number + scs_inst_group_name = var.scs_inst_group_name == "" ? "${local.sid}-scs-ig" : var.scs_inst_group_name + scs_hc_name = var.scs_hc_name == "" ? "${local.sid}-scs-hc" : var.scs_hc_name + scs_hc_port = var.scs_hc_port == "" ? "600${local.sap_scs_instance_number}" : var.scs_hc_port + scs_vip_name = var.scs_vip_name == "" ? "${local.sid}-scs-vip" : var.scs_vip_name + scs_vip_address = var.scs_vip_address == "" ? "" : var.scs_vip_address + scs_backend_svc_name = var.scs_backend_svc_name == "" ? "${local.sid}-scs-backend-svc" : var.scs_backend_svc_name + scs_forw_rule_name = var.scs_forw_rule_name == "" ? "${local.sid}-scs-fwd-rule" : var.scs_forw_rule_name + + sap_ers_instance_number = var.sap_ers_instance_number == "" ? "10" : var.sap_ers_instance_number + ers_inst_group_name = var.ers_inst_group_name == "" ? "${local.sid}-ers-ig" : var.ers_inst_group_name + ers_hc_name = var.ers_hc_name == "" ? "${local.sid}-ers-hc" : var.ers_hc_name + ers_hc_port = var.ers_hc_port == "" ? "600${local.sap_ers_instance_number}" : var.ers_hc_port + ers_vip_name = var.ers_vip_name == "" ? "${local.sid}-ers-vip" : var.ers_vip_name + ers_vip_address = var.ers_vip_address == "" ? "" : var.ers_vip_address + ers_backend_svc_name = var.ers_backend_svc_name == "" ? "${local.sid}-ers-backend-svc" : var.ers_backend_svc_name + ers_forw_rule_name = var.ers_forw_rule_name == "" ? "${local.sid}-ers-fwd-rule" : var.ers_forw_rule_name + + pacemaker_cluster_name = var.pacemaker_cluster_name == "" ? "${local.sid}-cluster" : var.pacemaker_cluster_name + subnetwork_uri = length(local.subnetwork_split) > 1 ? ( + "projects/${local.subnetwork_split[0]}/regions/${local.region}/subnetworks/${local.subnetwork_split[1]}") : ( + "projects/${var.project_id}/regions/${local.region}/subnetworks/${var.subnetwork}") + + primary_startup_url = var.sap_deployment_debug ? replace(var.primary_startup_url, "bash -s", "bash -x -s") : var.primary_startup_url + secondary_startup_url = var.sap_deployment_debug ? replace(var.secondary_startup_url, "bash -s", "bash -x -s") : var.secondary_startup_url +} + +################################################################################ +# disks +################################################################################ +resource "google_compute_disk" "nw_boot_disks" { + count = 2 + name = count.index == 0 ? "${var.sap_primary_instance}-boot" : "${var.sap_secondary_instance}-boot" + type = "pd-balanced" + zone = count.index == 0 ? var.sap_primary_zone : var.sap_secondary_zone + size = 30 + image = "${var.linux_image_project}/${var.linux_image}" + project = var.project_id + + lifecycle { + # Ignores newer versions of the OS image. Removing this lifecycle + # and re-applying will cause the current disk to be deleted. + # All existing data will be lost. + ignore_changes = [image] + } +} + +resource "google_compute_disk" "nw_usr_sap_disks" { + count = 2 + name = count.index == 0 ? "${var.sap_primary_instance}-usrsap" : "${var.sap_secondary_instance}-usrsap" + type = "pd-balanced" + zone = count.index == 0 ? var.sap_primary_zone : var.sap_secondary_zone + size = var.usr_sap_size + project = var.project_id +} + +resource "google_compute_disk" "nw_sapmnt_disks" { + count = 2 + name = count.index == 0 ? "${var.sap_primary_instance}-sapmnt" : "${var.sap_secondary_instance}-sapmnt" + type = "pd-balanced" + zone = count.index == 0 ? var.sap_primary_zone : var.sap_secondary_zone + size = var.sap_mnt_size + project = var.project_id +} + +resource "google_compute_disk" "nw_swap_disks" { + count = var.swap_size > 0 ? 2 : 0 + name = count.index == 0 ? "${var.sap_primary_instance}-swap" : "${var.sap_secondary_instance}-swap" + type = "pd-balanced" + zone = count.index == 0 ? var.sap_primary_zone : var.sap_secondary_zone + size = var.swap_size + project = var.project_id +} + +################################################################################ +# VM VIPs +################################################################################ + +resource "google_compute_address" "sap_nw_vm_ip" { + count = 2 + name = count.index == 0 ? "${var.sap_primary_instance}-ip" : "${var.sap_secondary_instance}-ip" + subnetwork = local.subnetwork_uri + address_type = "INTERNAL" + region = local.region + project = var.project_id +} + +################################################################################ +# instances +################################################################################ +resource "google_compute_instance" "scs_instance" { + name = var.sap_primary_instance + machine_type = var.machine_type + zone = var.sap_primary_zone + project = var.project_id + + boot_disk { + auto_delete = true + device_name = "boot" + source = google_compute_disk.nw_boot_disks[0].self_link + } + + attached_disk { + device_name = google_compute_disk.nw_usr_sap_disks[0].name + source = google_compute_disk.nw_usr_sap_disks[0].self_link + } + attached_disk { + device_name = google_compute_disk.nw_sapmnt_disks[0].name + source = google_compute_disk.nw_sapmnt_disks[0].self_link + } + dynamic "attached_disk" { + for_each = var.swap_size > 0 ? [1] : [] + content { + device_name = google_compute_disk.nw_swap_disks[0].name + source = google_compute_disk.nw_swap_disks[0].self_link + } + } + + can_ip_forward = var.can_ip_forward + network_interface { + subnetwork = local.subnetwork_uri + network_ip = google_compute_address.sap_nw_vm_ip.0.address + # we only include access_config if public_ip is true, an empty access_config + # will create an ephemeral public ip + dynamic "access_config" { + for_each = var.public_ip ? [1] : [] + content { + } + } + } + tags = flatten([var.network_tags, local.hc_network_tag]) + service_account { + # An empty string service account will default to the projects default compute engine service account + email = var.service_account + scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + dynamic "reservation_affinity" { + for_each = length(var.primary_reservation_name) > 1 ? [1] : [] + content { + type = "SPECIFIC_RESERVATION" + specific_reservation { + key = "compute.googleapis.com/reservation-name" + values = [var.primary_reservation_name] + } + } + } + metadata = { + startup-script = local.primary_startup_url + + # SCS settings + sap_primary_instance = var.sap_primary_instance + sap_primary_zone = var.sap_primary_zone + scs_hc_port = local.scs_hc_port + scs_vip_address = google_compute_address.nw_vips.0.address + scs_vip_name = local.scs_vip_name + + # ERS settings + sap_secondary_instance = var.sap_secondary_instance + sap_secondary_zone = var.sap_secondary_zone + ers_hc_port = local.ers_hc_port + ers_vip_address = google_compute_address.nw_vips.1.address + ers_vip_name = local.ers_vip_name + + # File system settings + nfs_path = var.nfs_path + + # SAP system settings + sap_sid = upper(var.sap_sid) + sap_scs_instance_number = local.sap_scs_instance_number + sap_ers_instance_number = local.sap_ers_instance_number + sap_ascs = local.ascs + + # Pacemaker settings + pacemaker_cluster_name = local.pacemaker_cluster_name + + # Other + sap_deployment_debug = var.sap_deployment_debug ? "True" : "False" + post_deployment_script = var.post_deployment_script + template-type = "TERRAFORM" + } + + lifecycle { + # Ignore changes in the instance metadata, since it is modified by the SAP startup script. + ignore_changes = [metadata] + } +} + +resource "google_compute_instance" "ers_instance" { + name = var.sap_secondary_instance + machine_type = var.machine_type + zone = var.sap_secondary_zone + project = var.project_id + + boot_disk { + auto_delete = true + device_name = "boot" + source = google_compute_disk.nw_boot_disks[1].self_link + } + + attached_disk { + device_name = google_compute_disk.nw_usr_sap_disks[1].name + source = google_compute_disk.nw_usr_sap_disks[1].self_link + } + attached_disk { + device_name = google_compute_disk.nw_sapmnt_disks[1].name + source = google_compute_disk.nw_sapmnt_disks[1].self_link + } + dynamic "attached_disk" { + for_each = var.swap_size > 0 ? [1] : [] + content { + device_name = google_compute_disk.nw_swap_disks[1].name + source = google_compute_disk.nw_swap_disks[1].self_link + } + } + + can_ip_forward = var.can_ip_forward + network_interface { + subnetwork = local.subnetwork_uri + network_ip = google_compute_address.sap_nw_vm_ip.1.address + # we only include access_config if public_ip is true, an empty access_config + # will create an ephemeral public ip + dynamic "access_config" { + for_each = var.public_ip ? [1] : [] + content { + } + } + } + tags = flatten([var.network_tags, local.hc_network_tag]) + service_account { + # An empty string service account will default to the projects default compute engine service account + email = var.service_account + scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + dynamic "reservation_affinity" { + for_each = length(var.secondary_reservation_name) > 1 ? [1] : [] + content { + type = "SPECIFIC_RESERVATION" + specific_reservation { + key = "compute.googleapis.com/reservation-name" + values = [var.secondary_reservation_name] + } + } + } + metadata = { + startup-script = local.secondary_startup_url + + # SCS settings + sap_primary_instance = var.sap_primary_instance + sap_primary_zone = var.sap_primary_zone + scs_hc_port = local.scs_hc_port + scs_vip_address = google_compute_address.nw_vips.0.address + scs_vip_name = local.scs_vip_name + + # ERS settings + sap_secondary_instance = var.sap_secondary_instance + sap_secondary_zone = var.sap_secondary_zone + ers_hc_port = local.ers_hc_port + ers_vip_address = google_compute_address.nw_vips.1.address + ers_vip_name = local.ers_vip_name + + # File system settings + nfs_path = var.nfs_path + + # SAP system settings + sap_sid = upper(var.sap_sid) + sap_scs_instance_number = local.sap_scs_instance_number + sap_ers_instance_number = local.sap_ers_instance_number + sap_ascs = local.ascs + + # Pacemaker settings + pacemaker_cluster_name = local.pacemaker_cluster_name + + # Other + sap_deployment_debug = var.sap_deployment_debug ? "True" : "False" + post_deployment_script = var.post_deployment_script + template-type = "TERRAFORM" + } + + lifecycle { + # Ignore changes in the instance metadata, since it is modified by the SAP startup script. + ignore_changes = [metadata] + } +} +################################################################################ +# NW VIPs +################################################################################ +resource "google_compute_address" "nw_vips" { + count = 2 + name = count.index == 0 ? local.scs_vip_name : local.ers_vip_name + subnetwork = local.subnetwork_uri + address_type = "INTERNAL" + address = count.index == 0 ? local.scs_vip_address : local.ers_vip_address + region = count.index == 0 ? local.primary_region : local.secondary_region + project = var.project_id +} + +################################################################################ +# IGs +################################################################################ +resource "google_compute_instance_group" "nw_instance_groups" { + count = 2 + name = count.index == 0 ? local.scs_inst_group_name : local.ers_inst_group_name + instances = count.index == 0 ? google_compute_instance.scs_instance.*.self_link : google_compute_instance.ers_instance.*.self_link + zone = count.index == 0 ? var.sap_primary_zone : var.sap_secondary_zone + project = var.project_id +} + +################################################################################ +# Health Checks +################################################################################ +resource "google_compute_health_check" "nw_hc" { + count = 2 + name = count.index == 0 ? local.scs_hc_name : local.ers_hc_name + timeout_sec = 10 + check_interval_sec = 10 + healthy_threshold = 2 + unhealthy_threshold = 2 + project = var.project_id + + tcp_health_check { + port = count.index == 0 ? local.scs_hc_port : local.ers_hc_port + } +} + +################################################################################ +# Firewall rule for the Health Checks +################################################################################ +resource "google_compute_firewall" "nw_hc_firewall_rule" { + name = local.hc_firewall_rule_name + count = local.is_vpc_network ? 0 : 1 + network = var.network + direction = "INGRESS" + source_ranges = ["35.191.0.0/16", "130.211.0.0/22"] + target_tags = local.hc_network_tag + project = var.project_id + + allow { + protocol = "tcp" + ports = [local.scs_hc_port, local.ers_hc_port] + } +} + +################################################################################ +# Backend services +################################################################################ +resource "google_compute_region_backend_service" "nw_regional_backend_services" { + count = 2 + name = count.index == 0 ? local.scs_backend_svc_name : local.ers_backend_svc_name + region = local.region + load_balancing_scheme = "INTERNAL" + health_checks = [element(google_compute_health_check.nw_hc.*.id, count.index)] + project = var.project_id + + failover_policy { + disable_connection_drain_on_failover = true + drop_traffic_if_unhealthy = true + failover_ratio = 1 + } + backend { + group = element(google_compute_instance_group.nw_instance_groups.*.id, count.index) + failover = false + } + backend { + group = element(google_compute_instance_group.nw_instance_groups.*.id, 1 - count.index) + failover = true + } +} + +################################################################################ +# Forwarding Rules +################################################################################ +resource "google_compute_forwarding_rule" "nw_forwarding_rules" { + count = 2 + name = count.index == 0 ? local.scs_forw_rule_name : local.ers_forw_rule_name + ip_address = element(google_compute_address.nw_vips.*.address, count.index) + region = local.region + load_balancing_scheme = "INTERNAL" + backend_service = element(google_compute_region_backend_service.nw_regional_backend_services.*.id, count.index) + all_ports = true + subnetwork = local.subnetwork_uri + project = var.project_id +} diff --git a/modules/sap_nw_ha/outputs.tf b/modules/sap_nw_ha/outputs.tf new file mode 100644 index 00000000..5058da33 --- /dev/null +++ b/modules/sap_nw_ha/outputs.tf @@ -0,0 +1,47 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +output "scs_instance" { + description = "SCS instance" + value = google_compute_instance.scs_instance.self_link +} +output "ers_instance" { + description = "ERS instance" + value = google_compute_instance.ers_instance.self_link +} +output "nw_vips" { + description = "NW virtual IPs" + value = google_compute_address.nw_vips.*.self_link +} +output "nw_instance_groups" { + description = "NW Instance Groups" + value = google_compute_instance_group.nw_instance_groups.*.self_link +} +output "nw_hc" { + description = "Health Checks" + value = google_compute_health_check.nw_hc.*.self_link +} +output "nw_hc_firewall" { + description = "Firewall rule for the Health Checks" + value = google_compute_firewall.nw_hc_firewall_rule.*.self_link +} +output "nw_regional_backend_services" { + description = "Backend Services" + value = google_compute_region_backend_service.nw_regional_backend_services.*.self_link +} +output "nw_forwarding_rules" { + description = "Forwarding rules" + value = google_compute_forwarding_rule.nw_forwarding_rules.*.self_link +} diff --git a/modules/sap_nw_ha/variables.tf b/modules/sap_nw_ha/variables.tf new file mode 100644 index 00000000..6f94c754 --- /dev/null +++ b/modules/sap_nw_ha/variables.tf @@ -0,0 +1,352 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +############################################################################## +## MANDATORY SETTINGS +############################################################################## +# +# General settings +# +variable "project_id" { + type = string + description = "Project id where the instances will be created" +} +variable "machine_type" { + type = string + description = "Machine type for the instances" +} +variable "network" { + type = string + description = "Network for the instances" +} +variable "subnetwork" { + type = string + description = "Subnetwork for the instances" +} +variable "linux_image" { + type = string + description = "Linux image name" +} +variable "linux_image_project" { + type = string + description = "Linux image project" +} +# +# SCS settings +# +variable "sap_primary_instance" { + type = string + description = "Name of first instance (initial SCS location)" + validation { + condition = can(regex("^[a-z0-9\\-]+$", var.sap_primary_instance)) && length(var.sap_primary_instance) <= 13 + error_message = "The sap_primary_instance must consist of lowercase letters (a-z), numbers, and hyphens and be less than 14 characters long." + } +} +variable "sap_primary_zone" { + type = string + description = "Zone where the first instance will be created" +} +# +# ERS settings +# +variable "sap_secondary_instance" { + type = string + description = "Name of second instance (initial ERS location)" + validation { + condition = can(regex("^[a-z0-9\\-]+$", var.sap_secondary_instance)) && length(var.sap_secondary_instance) <= 13 + error_message = "The sap_secondary_instance must consist of lowercase letters (a-z), numbers, and hyphens and be less than 14 characters long." + } +} +variable "sap_secondary_zone" { + type = string + description = "Zone where the second instance will be created" +} +# +# File system settings +# +variable "nfs_path" { + type = string + description = "NFS path for shared file system, e.g. 10.163.58.114:/ssd" + validation { + condition = var.nfs_path == "" || can(regex("(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}:\\/[^[:space:]]*", var.nfs_path)) + error_message = "The nfs_path must be an IP address followed by ':/' then some name." + } +} +# +# SAP system settings +# +variable "sap_sid" { + type = string + description = "SAP System ID" + validation { + condition = length(var.sap_sid) == 3 && can(regex("[A-Z][A-Z0-9]{2}", var.sap_sid)) + error_message = "The sap_sid must be 3 characters long and start with a letter and all letters must be capitalized." + } +} + +############################################################################## +## OPTIONAL SETTINGS (default values will be determined/calculated) +############################################################################## +# +# General settings +# +variable "hc_network_tag" { + type = list(string) + default = [] + description = "Network tag for the health check firewall rule" +} +variable "hc_firewall_rule_name" { + type = string + default = "" + description = "Name of firewall rule for the health check" +} +# +# SCS settings +# +variable "scs_inst_group_name" { + type = string + default = "" + description = "Name of SCS instance group" +} +variable "scs_hc_name" { + type = string + default = "" + description = "Name of SCS health check" +} +variable "scs_hc_port" { + type = string + default = "" + description = "Port of SCS health check" +} +variable "scs_vip_name" { + type = string + default = "" + description = "Name of SCS virtual IP" +} +variable "scs_vip_address" { + type = string + default = "" + description = "Address of SCS virtual IP" +} +variable "scs_backend_svc_name" { + type = string + default = "" + description = "Name of SCS backend service" +} +variable "scs_forw_rule_name" { + type = string + default = "" + description = "Name of SCS forwarding rule" +} +# +# ERS settings +# +variable "ers_inst_group_name" { + type = string + default = "" + description = "Name of ERS instance group" +} +variable "ers_hc_name" { + type = string + default = "" + description = "Name of ERS health check" +} +variable "ers_hc_port" { + type = string + default = "" + description = "Port of ERS health check" +} +variable "ers_vip_name" { + type = string + default = "" + description = "Name of ERS virtual IP" +} +variable "ers_vip_address" { + type = string + default = "" + description = "Address of ERS virtual IP" +} +variable "ers_backend_svc_name" { + type = string + default = "" + description = "Name of ERS backend service" +} +variable "ers_forw_rule_name" { + type = string + default = "" + description = "Name of ERS forwarding rule" +} +# +# File system settings +# +variable "usr_sap_size" { + type = number + default = 8 + description = "Size of /usr/sap in GB" + validation { + condition = var.usr_sap_size >= 8 + error_message = "Size of /usr/sap must be larger than 8 GB." + } +} +variable "sap_mnt_size" { + type = number + default = 8 + description = "Size of /sapmnt in GB" + + validation { + condition = var.sap_mnt_size >= 8 + error_message = "Size of /sapmnt must be larger than 8 GB." + } +} +variable "swap_size" { + type = number + default = 8 + description = "Size in GB of swap volume" + + validation { + condition = var.swap_size >= 8 + error_message = "Size of swap must be larger than 8 GB." + } +} +# +# SAP system settings +# +variable "sap_scs_instance_number" { + type = string + default = "00" + description = "SCS instance number" + validation { + condition = length(var.sap_scs_instance_number) == 2 && tonumber(var.sap_scs_instance_number) >= 0 + error_message = "The length of sap_scs_instance_number must be 2. If you'd like a single digit (x) then enter it as (0x)." + } +} +variable "sap_ers_instance_number" { + type = string + default = "10" + description = "ERS instance number" + validation { + condition = length(var.sap_ers_instance_number) == 2 && tonumber(var.sap_ers_instance_number) >= 0 + error_message = "The length of sap_ers_instance_number must be 2. If you'd like a single digit (x) then enter it as (0x)." + } +} +variable "sap_nw_abap" { + type = bool + default = true + description = "Is this a Netweaver ABAP installation. Set 'false' for NW Java. Dual stack is not supported by this script." +} +# +# Pacemaker settings +# +variable "pacemaker_cluster_name" { + type = string + default = "" + description = "Name of Pacemaker cluster." +} +# +# Optional Settings +# +variable "public_ip" { + type = bool + default = false + description = "Create an ephemeral public ip for the instances" +} +variable "service_account" { + type = string + default = "" + description = <<-EOT + Service account that will be used as the service account on the created instance. + Leave this blank to use the project default service account + EOT +} +variable "network_tags" { + type = list(string) + default = [] + description = "Network tags to apply to the instances" +} +variable "sap_deployment_debug" { + type = bool + default = false + description = "Debug log level for deployment" +} +variable "primary_reservation_name" { + type = string + description = <<-EOT + Use a reservation specified by RESERVATION_NAME. + By default ANY_RESERVATION is used when this variable is empty. + In order for a reservation to be used it must be created with the + "Select specific reservation" selected (specificReservationRequired set to true) + Be sure to create your reservation with the correct Min CPU Platform for the + following instance types: + n1-highmem-32 : Intel Broadwell + n1-highmem-64 : Intel Broadwell + n1-highmem-96 : Intel Skylake + n1-megamem-96 : Intel Skylake + m1-megamem-96 : "Intel Skylake" + All other instance types can have automatic Min CPU Platform" + EOT + default = "" +} + +variable "secondary_reservation_name" { + type = string + description = <<-EOT + Use a reservation specified by RESERVATION_NAME. + By default ANY_RESERVATION is used when this variable is empty. + In order for a reservation to be used it must be created with the + "Select specific reservation" selected (specificReservationRequired set to true) + Be sure to create your reservation with the correct Min CPU Platform for the + following instance types: + n1-highmem-32 : Intel Broadwell + n1-highmem-64 : Intel Broadwell + n1-highmem-96 : Intel Skylake + n1-megamem-96 : Intel Skylake + m1-megamem-96 : "Intel Skylake" + All other instance types can have automatic Min CPU Platform" + EOT + default = "" +} +# +# DO NOT MODIFY unless you know what you are doing +# +variable "primary_startup_url" { + type = string + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform/sap_nw_ha/startup_scs.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform" + description = "DO NOT USE" +} +variable "secondary_startup_url" { + type = string + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform/sap_nw_ha/startup_ers.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform" + description = "DO NOT USE" +} +variable "post_deployment_script" { + type = string + default = "" + description = <<-EOT + Specifies the location of a script to run after the deployment is complete. + The script should be hosted on a web server or in a GCS bucket. The URL should + begin with http:// https:// or gs://. Note that this script will be executed + on all VM's that the template creates. If you only want to run it on the master + instance you will need to add a check at the top of your script. + EOT +} + +# +# DO NOT MODIFY unless you know what you are doing +# +variable "can_ip_forward" { + type = bool + description = "Whether sending and receiving of packets with non-matching source or destination IPs is allowed." + default = true +} diff --git a/modules/sap_nw_ha/versions.tf b/modules/sap_nw_ha/versions.tf new file mode 100644 index 00000000..da31d1ff --- /dev/null +++ b/modules/sap_nw_ha/versions.tf @@ -0,0 +1,21 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +terraform { + required_version = ">=0.12.6" + required_providers { + google = {} + } +} \ No newline at end of file From 7971cf7f6538eaed8a2cae959e0a8e60b0f3cafa Mon Sep 17 00:00:00 2001 From: Google SAP Deployments Dev Date: Thu, 11 Apr 2024 18:29:45 +0000 Subject: [PATCH 2/8] Project import generated by Copybara. GitOrigin-RevId: f34e0227e9b1af54531a09ae635dddcccbbb4d0d --- modules/sap_hana/main.tf | 456 +++++++++-------- modules/sap_hana/outputs.tf | 2 +- modules/sap_hana/variables.tf | 134 ++--- modules/sap_hana/versions.tf | 7 +- modules/sap_hana_ha/main.tf | 648 ++++++++++++++----------- modules/sap_hana_ha/outputs.tf | 8 +- modules/sap_hana_ha/variables.tf | 140 +++--- modules/sap_hana_ha/versions.tf | 9 +- modules/sap_hana_scaleout/main.tf | 344 +++++++------ modules/sap_hana_scaleout/outputs.tf | 4 +- modules/sap_hana_scaleout/variables.tf | 74 +-- modules/sap_hana_scaleout/versions.tf | 9 +- modules/sap_nw/main.tf | 4 +- modules/sap_nw/variables.tf | 2 +- modules/sap_nw/versions.tf | 5 +- modules/sap_nw_ha/main.tf | 30 +- modules/sap_nw_ha/outputs.tf | 12 +- modules/sap_nw_ha/variables.tf | 4 +- modules/sap_nw_ha/versions.tf | 5 +- 19 files changed, 1033 insertions(+), 864 deletions(-) diff --git a/modules/sap_hana/main.tf b/modules/sap_hana/main.tf index a68dbadc..5b358bee 100644 --- a/modules/sap_hana/main.tf +++ b/modules/sap_hana/main.tf @@ -16,8 +16,8 @@ # # Terraform SAP HANA for Google Cloud # -# Version: 2.0.202402230649 -# Build Hash: c745a89b214d491fa9b641e2fff78abfe9965016 +# Version: 2.0.202404101403 +# Build Hash: 4d5e66e2ca20a6d498491377677dcc2f3579ebd7 # ################################################################################ @@ -25,82 +25,96 @@ ################################################################################ locals { mem_size_map = { - "n1-highmem-32" = 208 - "n1-highmem-64" = 416 - "n1-highmem-96" = 624 - "n1-megamem-96" = 1433 - "n2-highmem-32" = 256 - "n2-highmem-48" = 384 - "n2-highmem-64" = 512 - "n2-highmem-80" = 640 - "n2-highmem-96" = 768 - "n2-highmem-128" = 864 - "n1-ultramem-40" = 961 - "n1-ultramem-80" = 1922 - "n1-ultramem-160" = 3844 - "m1-megamem-96" = 1433 - "m1-ultramem-40" = 961 - "m1-ultramem-80" = 1922 - "m1-ultramem-160" = 3844 - "m2-ultramem-208" = 5888 - "m2-megamem-416" = 5888 - "m2-hypermem-416" = 8832 - "m2-ultramem-416" = 11744 - "m3-megamem-64" = 976 - "m3-megamem-128" = 1952 - "m3-ultramem-32" = 976 - "m3-ultramem-64" = 1952 - "m3-ultramem-128" = 3904 - "c3-standard-44" = 176 - "c3-highmem-44" = 352 - "c3-highmem-88" = 704 - "c3-highmem-176" = 1408 + "n1-highmem-32" = 208 + "n1-highmem-64" = 416 + "n1-highmem-96" = 624 + "n1-megamem-96" = 1433 + "n2-highmem-32" = 256 + "n2-highmem-48" = 384 + "n2-highmem-64" = 512 + "n2-highmem-80" = 640 + "n2-highmem-96" = 768 + "n2-highmem-128" = 864 + "n1-ultramem-40" = 961 + "n1-ultramem-80" = 1922 + "n1-ultramem-160" = 3844 + "m1-megamem-96" = 1433 + "m1-ultramem-40" = 961 + "m1-ultramem-80" = 1922 + "m1-ultramem-160" = 3844 + "m2-ultramem-208" = 5888 + "m2-megamem-416" = 5888 + "m2-hypermem-416" = 8832 + "m2-ultramem-416" = 11744 + "m3-megamem-64" = 976 + "m3-megamem-128" = 1952 + "m3-ultramem-32" = 976 + "m3-ultramem-64" = 1952 + "m3-ultramem-128" = 3904 + "c3-standard-44" = 176 + "c3-highmem-44" = 352 + "c3-highmem-88" = 704 + "c3-highmem-176" = 1408 + "c3-standard-192-metal" = 768 + "c3-highcpu-192-metal" = 512 + "c3-highmem-192-metal" = 1536 + "x4-megamem-960-metal" = 16384 + "x4-megamem-1440-metal" = 24576 + "x4-megamem-1920-metal" = 32768 } cpu_platform_map = { - "n1-highmem-32" = "Intel Broadwell" - "n1-highmem-64" = "Intel Broadwell" - "n1-highmem-96" = "Intel Skylake" - "n1-megamem-96" = "Intel Skylake" - "n2-highmem-32" = "Automatic" - "n2-highmem-48" = "Automatic" - "n2-highmem-64" = "Automatic" - "n2-highmem-80" = "Automatic" - "n2-highmem-96" = "Automatic" - "n2-highmem-128" = "Automatic" - "n1-ultramem-40" = "Automatic" - "n1-ultramem-80" = "Automatic" - "n1-ultramem-160" = "Automatic" - "m1-megamem-96" = "Intel Skylake" - "m1-ultramem-40" = "Automatic" - "m1-ultramem-80" = "Automatic" - "m1-ultramem-160" = "Automatic" - "m2-ultramem-208" = "Automatic" - "m2-megamem-416" = "Automatic" - "m2-hypermem-416" = "Automatic" - "m2-ultramem-416" = "Automatic" - "m3-megamem-64" = "Automatic" - "m3-megamem-128" = "Automatic" - "m3-ultramem-32" = "Automatic" - "m3-ultramem-64" = "Automatic" - "m3-ultramem-128" = "Automatic" - "c3-standard-44" = "Automatic" - "c3-highmem-44" = "Automatic" - "c3-highmem-88" = "Automatic" - "c3-highmem-176" = "Automatic" - } - + "n1-highmem-32" = "Intel Broadwell" + "n1-highmem-64" = "Intel Broadwell" + "n1-highmem-96" = "Intel Skylake" + "n1-megamem-96" = "Intel Skylake" + "n2-highmem-32" = "Automatic" + "n2-highmem-48" = "Automatic" + "n2-highmem-64" = "Automatic" + "n2-highmem-80" = "Automatic" + "n2-highmem-96" = "Automatic" + "n2-highmem-128" = "Automatic" + "n1-ultramem-40" = "Automatic" + "n1-ultramem-80" = "Automatic" + "n1-ultramem-160" = "Automatic" + "m1-megamem-96" = "Intel Skylake" + "m1-ultramem-40" = "Automatic" + "m1-ultramem-80" = "Automatic" + "m1-ultramem-160" = "Automatic" + "m2-ultramem-208" = "Automatic" + "m2-megamem-416" = "Automatic" + "m2-hypermem-416" = "Automatic" + "m2-ultramem-416" = "Automatic" + "m3-megamem-64" = "Automatic" + "m3-megamem-128" = "Automatic" + "m3-ultramem-32" = "Automatic" + "m3-ultramem-64" = "Automatic" + "m3-ultramem-128" = "Automatic" + "c3-standard-44" = "Automatic" + "c3-highmem-44" = "Automatic" + "c3-highmem-88" = "Automatic" + "c3-highmem-176" = "Automatic" + "c3-standard-192-metal" = "Automatic" + "c3-highcpu-192-metal" = "Automatic" + "c3-highmem-192-metal" = "Automatic" + "x4-megamem-960-metal" = "Automatic" + "x4-megamem-1440-metal" = "Automatic" + "x4-megamem-1920-metal" = "Automatic" + } + + native_bm = length(regexall("metal", var.machine_type)) > 0 # Minimum disk sizes are used to ensure throughput. Extreme disks don't need this. # All 'over provisioned' capacity is to go onto the data disk. + final_disk_type = var.disk_type == "" ? (local.native_bm ? "hyperdisk-extreme" : "pd-ssd") : var.disk_type min_total_disk_map = { - "pd-ssd" = 550 - "pd-balanced" = 943 - "pd-extreme" = 0 + "pd-ssd" = 550 + "pd-balanced" = 943 + "pd-extreme" = 0 "hyperdisk-balanced" = 0 - "hyperdisk-extreme" = 0 + "hyperdisk-extreme" = 0 } - min_total_disk = local.min_total_disk_map[var.disk_type] + min_total_disk = local.min_total_disk_map[local.final_disk_type] mem_size = lookup(local.mem_size_map, var.machine_type, 320) hana_log_size = ceil(min(512, max(64, local.mem_size / 2))) @@ -108,11 +122,11 @@ locals { hana_shared_size_min = min(1024, local.mem_size) hana_usrsap_size = 32 - hana_data_size = max(local.hana_data_size_min, local.min_total_disk - local.hana_usrsap_size - local.hana_log_size - local.hana_shared_size_min ) + hana_data_size = max(local.hana_data_size_min, local.min_total_disk - local.hana_usrsap_size - local.hana_log_size - local.hana_shared_size_min) # scaleout_nodes > 0 then hana_shared_size and backup is changed; assumes that sap_hana_scaleout_nodes is an integer - hana_shared_size = var.sap_hana_scaleout_nodes > 0 ? local.hana_shared_size_min * ceil(var.sap_hana_scaleout_nodes / 4): local.hana_shared_size_min - backup_size = var.sap_hana_backup_size > 0 ? var.sap_hana_backup_size : 2 * local.mem_size * (var.sap_hana_scaleout_nodes + 1) + hana_shared_size = var.sap_hana_scaleout_nodes > 0 ? local.hana_shared_size_min * ceil(var.sap_hana_scaleout_nodes / 4) : local.hana_shared_size_min + backup_size = var.sap_hana_backup_size > 0 ? var.sap_hana_backup_size : 2 * local.mem_size * (var.sap_hana_scaleout_nodes + 1) # ensure the combined disk meets minimum size/performance ; pd_size = ceil(max(local.min_total_disk, local.hana_log_size + local.hana_data_size_min + local.hana_shared_size + local.hana_usrsap_size + 1)) @@ -120,100 +134,100 @@ locals { # ensure pd-hdd for backup is smaller than the maximum pd size pd_size_worker = ceil(max(local.min_total_disk, local.hana_log_size + local.hana_data_size_min + local.hana_usrsap_size + 1)) - final_data_disk_type = var.data_disk_type_override == "" ? var.disk_type : var.data_disk_type_override - final_log_disk_type = var.log_disk_type_override == "" ? var.disk_type : var.log_disk_type_override - - temp_shared_disk_type = contains(["hyperdisk-extreme", "hyperdisk-balanced", "pd-extreme"], var.disk_type) ? "pd-balanced" : var.disk_type - temp_usrsap_disk_type = contains(["hyperdisk-extreme", "hyperdisk-balanced", "pd-extreme"], var.disk_type) ? "pd-balanced" : var.disk_type - + unified_pd_size = var.unified_disk_size_override == null ? local.pd_size : var.unified_disk_size_override + unified_worker_pd_size = var.unified_worker_disk_size_override == null ? local.pd_size_worker : var.unified_worker_disk_size_override + data_pd_size = var.data_disk_size_override == null ? local.hana_data_size : var.data_disk_size_override + log_pd_size = var.log_disk_size_override == null ? local.hana_log_size : var.log_disk_size_override + shared_pd_size = var.shared_disk_size_override == null ? local.hana_shared_size : var.shared_disk_size_override + usrsap_pd_size = var.usrsap_disk_size_override == null ? local.hana_usrsap_size : var.usrsap_disk_size_override + + # Disk types + final_data_disk_type = var.data_disk_type_override == "" ? local.final_disk_type : var.data_disk_type_override + final_log_disk_type = var.log_disk_type_override == "" ? local.final_disk_type : var.log_disk_type_override + temp_shared_disk_type = local.native_bm ? "hyperdisk-balanced" : (contains(["hyperdisk-extreme", "hyperdisk-balanced", "pd-extreme"], local.final_disk_type) ? "pd-balanced" : local.final_disk_type) + temp_usrsap_disk_type = local.native_bm ? "hyperdisk-balanced" : (contains(["hyperdisk-extreme", "hyperdisk-balanced", "pd-extreme"], local.final_disk_type) ? "pd-balanced" : local.final_disk_type) final_shared_disk_type = var.shared_disk_type_override == "" ? local.temp_shared_disk_type : var.shared_disk_type_override final_usrsap_disk_type = var.usrsap_disk_type_override == "" ? local.temp_usrsap_disk_type : var.usrsap_disk_type_override + final_backup_disk_type = var.backup_disk_type == "" ? (local.native_bm ? "hyperdisk-balanced" : "pd-balanced") : var.backup_disk_type - unified_pd_size = var.unified_disk_size_override == null ? local.pd_size : var.unified_disk_size_override - unified_worker_pd_size = var.unified_worker_disk_size_override == null ? local.pd_size_worker : var.unified_worker_disk_size_override - data_pd_size = var.data_disk_size_override == null ? local.hana_data_size : var.data_disk_size_override - log_pd_size = var.log_disk_size_override == null ? local.hana_log_size : var.log_disk_size_override - shared_pd_size = var.shared_disk_size_override == null ? local.hana_shared_size : var.shared_disk_size_override - usrsap_pd_size = var.usrsap_disk_size_override == null ? local.hana_usrsap_size : var.usrsap_disk_size_override - - # IOPS + # Disk IOPS hdx_iops_map = { - "data" = max(10000, local.data_pd_size*2) - "log" = max(10000, local.log_pd_size*2) - "shared" = null - "usrsap" = null - "unified" = max(10000, local.data_pd_size*2) + max(10000, local.log_pd_size*2) - "worker" = max(10000, local.data_pd_size*2) + max(10000, local.log_pd_size*2) - "backup" = max(10000, 2 * local.backup_size) + "data" = max(10000, local.data_pd_size * 2) + "log" = max(10000, local.log_pd_size * 2) + "shared" = null + "usrsap" = null + "unified" = max(10000, local.data_pd_size * 2) + max(10000, local.log_pd_size * 2) + "worker" = max(10000, local.data_pd_size * 2) + max(10000, local.log_pd_size * 2) + "backup" = max(10000, 2 * local.backup_size) } hdb_iops_map = { - "data" = var.hyperdisk_balanced_iops_default - "log" = var.hyperdisk_balanced_iops_default - "shared" = null - "usrsap" = null + "data" = var.hyperdisk_balanced_iops_default + "log" = var.hyperdisk_balanced_iops_default + "shared" = null + "usrsap" = null "unified" = var.hyperdisk_balanced_iops_default - "worker" = var.hyperdisk_balanced_iops_default - "backup" = var.hyperdisk_balanced_iops_default + "worker" = var.hyperdisk_balanced_iops_default + "backup" = var.hyperdisk_balanced_iops_default } null_iops_map = { - "data" = null - "log" = null - "shared" = null - "usrsap" = null + "data" = null + "log" = null + "shared" = null + "usrsap" = null "unified" = null - "worker" = null - "backup" = null + "worker" = null + "backup" = null } iops_map = { - "pd-ssd" = local.null_iops_map - "pd-balanced" = local.null_iops_map - "pd-extreme" = local.hdx_iops_map + "pd-ssd" = local.null_iops_map + "pd-balanced" = local.null_iops_map + "pd-extreme" = local.hdx_iops_map "hyperdisk-balanced" = local.hdb_iops_map - "hyperdisk-extreme" = local.hdx_iops_map + "hyperdisk-extreme" = local.hdx_iops_map } - final_data_iops = var.data_disk_iops_override == null ? local.iops_map[local.final_data_disk_type]["data"] : var.data_disk_iops_override - final_log_iops = var.log_disk_iops_override == null ? local.iops_map[local.final_log_disk_type]["log"] : var.log_disk_iops_override - final_shared_iops = var.shared_disk_iops_override == null ? local.iops_map[local.final_shared_disk_type]["shared"] : var.shared_disk_iops_override - final_usrsap_iops = var.usrsap_disk_iops_override == null ? local.iops_map[local.final_usrsap_disk_type]["usrsap"] : var.usrsap_disk_iops_override - final_unified_iops = var.unified_disk_iops_override == null ? local.iops_map[var.disk_type]["unified"] : var.unified_disk_iops_override - final_unified_worker_iops = var.unified_worker_disk_iops_override == null ? local.iops_map[var.disk_type]["worker"] : var.unified_worker_disk_iops_override - final_backup_iops = var.backup_disk_iops_override == null ? local.iops_map[var.backup_disk_type]["backup"] : var.backup_disk_iops_override + final_data_iops = var.data_disk_iops_override == null ? local.iops_map[local.final_data_disk_type]["data"] : var.data_disk_iops_override + final_log_iops = var.log_disk_iops_override == null ? local.iops_map[local.final_log_disk_type]["log"] : var.log_disk_iops_override + final_shared_iops = var.shared_disk_iops_override == null ? local.iops_map[local.final_shared_disk_type]["shared"] : var.shared_disk_iops_override + final_usrsap_iops = var.usrsap_disk_iops_override == null ? local.iops_map[local.final_usrsap_disk_type]["usrsap"] : var.usrsap_disk_iops_override + final_unified_iops = var.unified_disk_iops_override == null ? local.iops_map[local.final_disk_type]["unified"] : var.unified_disk_iops_override + final_unified_worker_iops = var.unified_worker_disk_iops_override == null ? local.iops_map[local.final_disk_type]["worker"] : var.unified_worker_disk_iops_override + final_backup_iops = var.backup_disk_iops_override == null ? local.iops_map[local.final_backup_disk_type]["backup"] : var.backup_disk_iops_override - # THROUGHPUT + # Disk throughput MB/s hdb_throughput_map = { - "data" = var.hyperdisk_balanced_throughput_default - "log" = var.hyperdisk_balanced_throughput_default - "shared" = null - "usrsap" = null + "data" = var.hyperdisk_balanced_throughput_default + "log" = var.hyperdisk_balanced_throughput_default + "shared" = null + "usrsap" = null "unified" = var.hyperdisk_balanced_throughput_default - "worker" = var.hyperdisk_balanced_throughput_default - "backup" = var.hyperdisk_balanced_throughput_default + "worker" = var.hyperdisk_balanced_throughput_default + "backup" = var.hyperdisk_balanced_throughput_default } null_throughput_map = { - "data" = null - "log" = null - "shared" = null - "usrsap" = null + "data" = null + "log" = null + "shared" = null + "usrsap" = null "unified" = null - "worker" = null - "backup" = null + "worker" = null + "backup" = null } throughput_map = { - "pd-ssd" = local.null_throughput_map - "pd-balanced" = local.null_throughput_map - "pd-extreme" = local.null_throughput_map + "pd-ssd" = local.null_throughput_map + "pd-balanced" = local.null_throughput_map + "pd-extreme" = local.null_throughput_map "hyperdisk-balanced" = local.hdb_throughput_map - "hyperdisk-extreme" = local.null_throughput_map + "hyperdisk-extreme" = local.null_throughput_map } - final_data_throughput = var.data_disk_throughput_override == null ? local.throughput_map[local.final_data_disk_type]["data"] : var.data_disk_throughput_override - final_log_throughput = var.log_disk_throughput_override == null ? local.throughput_map[local.final_log_disk_type]["log"] : var.log_disk_throughput_override - final_shared_throughput = var.shared_disk_throughput_override == null ? local.throughput_map[local.final_shared_disk_type]["shared"] : var.shared_disk_throughput_override - final_usrsap_throughput = var.usrsap_disk_throughput_override == null ? local.throughput_map[local.final_usrsap_disk_type]["usrsap"] : var.usrsap_disk_throughput_override - final_unified_throughput = var.unified_disk_throughput_override == null ? local.throughput_map[var.disk_type]["unified"] : var.unified_disk_throughput_override - final_unified_worker_throughput = var.unified_worker_disk_throughput_override == null ? local.throughput_map[var.disk_type]["worker"] : var.unified_worker_disk_throughput_override - final_backup_throughput = var.backup_disk_throughput_override == null ? local.throughput_map[var.backup_disk_type]["backup"] : var.backup_disk_throughput_override + final_data_throughput = var.data_disk_throughput_override == null ? local.throughput_map[local.final_data_disk_type]["data"] : var.data_disk_throughput_override + final_log_throughput = var.log_disk_throughput_override == null ? local.throughput_map[local.final_log_disk_type]["log"] : var.log_disk_throughput_override + final_shared_throughput = var.shared_disk_throughput_override == null ? local.throughput_map[local.final_shared_disk_type]["shared"] : var.shared_disk_throughput_override + final_usrsap_throughput = var.usrsap_disk_throughput_override == null ? local.throughput_map[local.final_usrsap_disk_type]["usrsap"] : var.usrsap_disk_throughput_override + final_unified_throughput = var.unified_disk_throughput_override == null ? local.throughput_map[local.final_disk_type]["unified"] : var.unified_disk_throughput_override + final_unified_worker_throughput = var.unified_worker_disk_throughput_override == null ? local.throughput_map[local.final_disk_type]["worker"] : var.unified_worker_disk_throughput_override + final_backup_throughput = var.backup_disk_throughput_override == null ? local.throughput_map[local.final_backup_disk_type]["backup"] : var.backup_disk_throughput_override # network config variables zone_split = split("-", var.zone) @@ -225,7 +239,7 @@ locals { primary_startup_url = var.sap_deployment_debug ? replace(var.primary_startup_url, "bash -s", "bash -x -s") : var.primary_startup_url secondary_startup_url = var.sap_deployment_debug ? replace(var.secondary_startup_url, "bash -s", "bash -x -s") : var.secondary_startup_url - has_shared_nfs = !( var.sap_hana_shared_nfs == "" && var.sap_hana_shared_nfs_resource == null) + has_shared_nfs = !(var.sap_hana_shared_nfs == "" && var.sap_hana_shared_nfs_resource == null) make_shared_disk = !var.use_single_shared_data_log_disk && !local.has_shared_nfs use_backup_disk = (var.include_backup_disk && var.sap_hana_backup_nfs == "" && var.sap_hana_backup_nfs_resource == null) @@ -238,18 +252,31 @@ locals { } +# tflint-ignore: terraform_unused_declarations data "assert_test" "one_backup" { - test = local.use_backup_disk || !local.both_backup_nfs_defined + test = local.use_backup_disk || !local.both_backup_nfs_defined throw = "Either use a disk for /backup (include_backup_disk) or use NFS. If using an NFS as /backup then only either sap_hana_backup_nfs or sap_hana_backup_nfs_resource may be defined." } +# tflint-ignore: terraform_unused_declarations data "assert_test" "one_shared" { - test = !local.both_shared_nfs_defined + test = !local.both_shared_nfs_defined throw = "If using an NFS as /shared then only either sap_hana_shared_nfs or sap_hana_shared_nfs_resource may be defined." } +# tflint-ignore: terraform_unused_declarations data "assert_test" "both_or_neither_nfs" { - test = (local.backup_nfs_endpoint == "") == (local.shared_nfs_endpoint == "") + test = (local.backup_nfs_endpoint == "") == (local.shared_nfs_endpoint == "") throw = "If either NFS is defined, then both /shared and /backup must be defined." } +# tflint-ignore: terraform_unused_declarations +data "assert_test" "hyperdisk_with_native_bm" { + test = local.native_bm ? length(regexall("hyperdisk", local.final_disk_type)) > 0 : true + throw = "Native bare metal machines only work with hyperdisks. Set 'disk_type' accordingly, e.g. 'disk_type = hyperdisk-balanced'" +} +# tflint-ignore: terraform_unused_declarations +data "assert_test" "backup_hyperdisk_with_native_bm" { + test = local.native_bm && local.use_backup_disk ? (length(regexall("hyperdisk", local.final_backup_disk_type)) > 0) : true + throw = "Native bare metal machines only work with hyperdisks. Set 'backup_disk_type' accordingly, e.g. 'backup_disk_type = hyperdisk-balanced'" +} ################################################################################ # disks @@ -258,7 +285,7 @@ data "assert_test" "both_or_neither_nfs" { resource "google_compute_disk" "sap_hana_boot_disks" { count = var.sap_hana_scaleout_nodes + 1 name = format("${var.instance_name}-boot%05d", count.index + 1) - type = "pd-balanced" + type = local.native_bm ? "hyperdisk-balanced" : "pd-balanced" zone = var.zone size = 30 # GB project = var.project_id @@ -273,78 +300,78 @@ resource "google_compute_disk" "sap_hana_boot_disks" { } resource "google_compute_disk" "sap_hana_unified_disks" { - count = var.use_single_shared_data_log_disk ? 1 : 0 - name = format("${var.instance_name}-hana") - type = var.disk_type - zone = var.zone - size = local.unified_pd_size - project = var.project_id - provisioned_iops = local.final_unified_iops + count = var.use_single_shared_data_log_disk ? 1 : 0 + name = format("${var.instance_name}-hana") + type = local.final_disk_type + zone = var.zone + size = local.unified_pd_size + project = var.project_id + provisioned_iops = local.final_unified_iops provisioned_throughput = local.final_unified_throughput } resource "google_compute_disk" "sap_hana_unified_worker_disks" { - count = var.use_single_shared_data_log_disk ? var.sap_hana_scaleout_nodes : 0 - name = format("${var.instance_name}-hana%05d", count.index + 1) - type = var.disk_type - zone = var.zone - size = local.unified_worker_pd_size - project = var.project_id - provisioned_iops = local.final_unified_worker_iops - provisioned_throughput = local.final_unified_throughput + count = var.use_single_shared_data_log_disk ? var.sap_hana_scaleout_nodes : 0 + name = format("${var.instance_name}-hana%05d", count.index + 1) + type = local.final_disk_type + zone = var.zone + size = local.unified_worker_pd_size + project = var.project_id + provisioned_iops = local.final_unified_worker_iops + provisioned_throughput = local.final_unified_worker_throughput } # Split data/log/sap disks resource "google_compute_disk" "sap_hana_data_disks" { - count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 - name = format("${var.instance_name}-data%05d", count.index + 1) - type = local.final_data_disk_type - zone = var.zone - size = local.data_pd_size - project = var.project_id - provisioned_iops = local.final_data_iops + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = format("${var.instance_name}-data%05d", count.index + 1) + type = local.final_data_disk_type + zone = var.zone + size = local.data_pd_size + project = var.project_id + provisioned_iops = local.final_data_iops provisioned_throughput = local.final_data_throughput } resource "google_compute_disk" "sap_hana_log_disks" { - count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 - name = format("${var.instance_name}-log%05d", count.index + 1) - type = local.final_log_disk_type - zone = var.zone - size = local.log_pd_size - project = var.project_id - provisioned_iops = local.final_log_iops + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = format("${var.instance_name}-log%05d", count.index + 1) + type = local.final_log_disk_type + zone = var.zone + size = local.log_pd_size + project = var.project_id + provisioned_iops = local.final_log_iops provisioned_throughput = local.final_log_throughput } resource "google_compute_disk" "sap_hana_shared_disk" { - count = local.make_shared_disk ? 1 : 0 - name = format("${var.instance_name}-shared%05d", count.index + 1) - type = local.final_shared_disk_type - zone = var.zone - size = local.shared_pd_size - project = var.project_id - provisioned_iops = local.final_shared_iops + count = local.make_shared_disk ? 1 : 0 + name = format("${var.instance_name}-shared%05d", count.index + 1) + type = local.final_shared_disk_type + zone = var.zone + size = local.shared_pd_size + project = var.project_id + provisioned_iops = local.final_shared_iops provisioned_throughput = local.final_shared_throughput } resource "google_compute_disk" "sap_hana_usrsap_disks" { - count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 - name = format("${var.instance_name}-usrsap%05d", count.index + 1) - type = local.final_usrsap_disk_type - zone = var.zone - size = local.usrsap_pd_size - project = var.project_id - provisioned_iops = local.final_usrsap_iops + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = format("${var.instance_name}-usrsap%05d", count.index + 1) + type = local.final_usrsap_disk_type + zone = var.zone + size = local.usrsap_pd_size + project = var.project_id + provisioned_iops = local.final_usrsap_iops provisioned_throughput = local.final_usrsap_throughput } resource "google_compute_disk" "sap_hana_backup_disk" { - count = local.use_backup_disk ? 1 : 0 - name = "${var.instance_name}-backup" - type = var.backup_disk_type - zone = var.zone - size = local.backup_size - project = var.project_id - provisioned_iops = local.final_backup_iops + count = local.use_backup_disk ? 1 : 0 + name = "${var.instance_name}-backup" + type = local.final_backup_disk_type + zone = var.zone + size = local.backup_size + project = var.project_id + provisioned_iops = local.final_backup_iops provisioned_throughput = local.final_backup_throughput } @@ -387,8 +414,14 @@ resource "google_compute_instance" "sap_hana_primary_instance" { source = google_compute_disk.sap_hana_boot_disks[0].self_link } + dynamic "scheduling" { + for_each = local.native_bm ? [1] : [] + content { + on_host_maintenance = "TERMINATE" + } + } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [1] : [] content { device_name = google_compute_disk.sap_hana_unified_disks[0].name @@ -396,35 +429,35 @@ resource "google_compute_instance" "sap_hana_primary_instance" { } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [] : [1] content { device_name = google_compute_disk.sap_hana_data_disks[0].name source = google_compute_disk.sap_hana_data_disks[0].self_link } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [] : [1] content { device_name = google_compute_disk.sap_hana_log_disks[0].name source = google_compute_disk.sap_hana_log_disks[0].self_link } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = length(google_compute_disk.sap_hana_shared_disk) > 0 ? [1] : [] content { device_name = google_compute_disk.sap_hana_shared_disk[0].name source = google_compute_disk.sap_hana_shared_disk[0].self_link } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [] : [1] content { device_name = google_compute_disk.sap_hana_usrsap_disks[0].name source = google_compute_disk.sap_hana_usrsap_disks[0].self_link } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = length(google_compute_disk.sap_hana_backup_disk) > 0 ? [1] : [] content { device_name = google_compute_disk.sap_hana_backup_disk[0].name @@ -437,7 +470,7 @@ resource "google_compute_instance" "sap_hana_primary_instance" { network_interface { subnetwork = local.subnetwork_uri network_ip = google_compute_address.sap_hana_vm_ip.address - nic_type = var.nic_type == "" ? null : var.nic_type + nic_type = var.nic_type == "" ? null : var.nic_type # we only include access_config if public_ip is true, an empty access_config # will create an ephemeral public ip dynamic "access_config" { @@ -490,6 +523,7 @@ resource "google_compute_instance" "sap_hana_primary_instance" { sap_hana_shared_disk = local.make_shared_disk sap_hana_data_disk_type = local.final_data_disk_type enable_fast_restart = var.enable_fast_restart + native_bm = local.native_bm template-type = "TERRAFORM" } @@ -514,7 +548,14 @@ resource "google_compute_instance" "sap_hana_worker_instances" { source = google_compute_disk.sap_hana_boot_disks[count.index + 1].self_link } - dynamic attached_disk { + dynamic "scheduling" { + for_each = local.native_bm ? [1] : [] + content { + on_host_maintenance = "TERMINATE" + } + } + + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [1] : [] content { device_name = google_compute_disk.sap_hana_unified_worker_disks[count.index].name @@ -522,21 +563,21 @@ resource "google_compute_instance" "sap_hana_worker_instances" { } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [] : [1] content { device_name = google_compute_disk.sap_hana_data_disks[count.index + 1].name source = google_compute_disk.sap_hana_data_disks[count.index + 1].self_link } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [] : [1] content { device_name = google_compute_disk.sap_hana_log_disks[count.index + 1].name source = google_compute_disk.sap_hana_log_disks[count.index + 1].self_link } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [] : [1] content { device_name = google_compute_disk.sap_hana_usrsap_disks[count.index + 1].name @@ -549,7 +590,7 @@ resource "google_compute_instance" "sap_hana_worker_instances" { network_interface { subnetwork = local.subnetwork_uri network_ip = google_compute_address.sap_hana_worker_ip[count.index].address - nic_type = var.nic_type == "" ? null : var.nic_type + nic_type = var.nic_type == "" ? null : var.nic_type # we only include access_config if public_ip is true, an empty access_config # will create an ephemeral public ip dynamic "access_config" { @@ -600,6 +641,7 @@ resource "google_compute_instance" "sap_hana_worker_instances" { sap_hana_backup_disk = false sap_hana_shared_disk = false enable_fast_restart = var.enable_fast_restart + native_bm = local.native_bm template-type = "TERRAFORM" } diff --git a/modules/sap_hana/outputs.tf b/modules/sap_hana/outputs.tf index 491afae6..a19bf449 100644 --- a/modules/sap_hana/outputs.tf +++ b/modules/sap_hana/outputs.tf @@ -20,5 +20,5 @@ output "sap_hana_primary_self_link" { output "sap_hana_worker_self_links" { description = "SAP HANA self-links for the secondary instances created" - value = google_compute_instance.sap_hana_worker_instances.*.self_link + value = google_compute_instance.sap_hana_worker_instances[*].self_link } diff --git a/modules/sap_hana/variables.tf b/modules/sap_hana/variables.tf index dff8c6ce..2c8ee7fe 100644 --- a/modules/sap_hana/variables.tf +++ b/modules/sap_hana/variables.tf @@ -56,10 +56,10 @@ variable "sap_hana_deployment_bucket" { type = string description = "The Cloud Storage path that contains the SAP HANA media, do not include gs://. If this is not defined, the GCE instance will be provisioned without SAP HANA installed." validation { - condition = (! (length(regexall( "gs:", var.sap_hana_deployment_bucket)) > 0)) + condition = (!(length(regexall("gs:", var.sap_hana_deployment_bucket)) > 0)) error_message = "The sap_hana_deployment_bucket must only contain the Cloud Storage path, which includes the bucket name and the names of any folders. Do not include gs://." } - default = "" + default = "" } variable "sap_hana_sid" { @@ -166,7 +166,7 @@ variable "sap_hana_shared_nfs_resource" { } variable "sap_hana_shared_nfs" { - type = string + type = string default = "" validation { condition = var.sap_hana_shared_nfs == "" || can(regex("(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}:\\/[^[:space:]]*", var.sap_hana_shared_nfs)) @@ -176,7 +176,7 @@ variable "sap_hana_shared_nfs" { } variable "sap_hana_backup_nfs" { - type = string + type = string default = "" validation { condition = var.sap_hana_backup_nfs == "" || can(regex("(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}:\\/[^[:space:]]*", var.sap_hana_backup_nfs)) @@ -263,23 +263,23 @@ variable "nic_type" { condition = contains(["VIRTIO_NET", "GVNIC", ""], var.nic_type) error_message = "The nic_type must be either GVNIC or VIRTIO_NET." } - default = "" + default = "" } variable "disk_type" { - type = string - description = "Optional - The default disk type to use on all disks deployed. Extreme disks are not supported on all machine types. See https://cloud.google.com/compute/docs/disks/ for details." + type = string + description = "Optional - The default disk type to use for disk(s) containing log and data volumes. The default is hyperdisk-extreme for native bare metal machines and pd-ssd otherwise. Not all disk are supported on all machine types - see https://cloud.google.com/compute/docs/disks/ for details." validation { - condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme"], var.disk_type) - error_message = "The disk_type must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + condition = contains(["", "pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme"], var.disk_type) + error_message = "The disk_type must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." } - default = "pd-ssd" + default = "" } variable "use_single_shared_data_log_disk" { - type = bool + type = bool description = "Optional - By default three separate disk for data, logs, and shared will be made. If set to true, one disk will be used instead." - default = false + default = false } variable "include_backup_disk" { @@ -290,12 +290,12 @@ variable "include_backup_disk" { variable "backup_disk_type" { type = string - description = "Optional - The default is pd-balanced, only used if a backup disk is needed." - default = "pd-balanced" + description = "Optional - The default is hyperdisk-balanced for native bare metal machines and pd-balanced otherwise, only used if a backup disk is needed." validation { - condition = contains(["pd-ssd", "pd-balanced", "pd-standard", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme"], var.backup_disk_type) + condition = contains(["", "pd-ssd", "pd-balanced", "pd-standard", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme"], var.backup_disk_type) error_message = "The disk_type must be either pd-ssd, pd-balanced, pd-standard, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." } + default = "" } variable "vm_static_ip" { @@ -305,7 +305,7 @@ variable "vm_static_ip" { condition = var.vm_static_ip == "" || can(regex("^(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$", var.vm_static_ip)) error_message = "The vm_static_ip must be a valid IP address." } - default = "" + default = "" } variable "worker_static_ips" { @@ -317,7 +317,7 @@ variable "worker_static_ips" { ]) error_message = "All worker_static_ips must be valid IP addresses." } - default = [] + default = [] } variable "enable_fast_restart" { @@ -327,15 +327,15 @@ variable "enable_fast_restart" { } variable "hyperdisk_balanced_iops_default" { - type = number + type = number description = "Optional - default is 3000. Number of IOPS that is set for each disk of type Hyperdisk-balanced (except for boot/usrsap/shared disks)." - default = 3000 + default = 3000 } variable "hyperdisk_balanced_throughput_default" { - type = number + type = number description = "Optional - default is 750. Throughput in MB/s that is set for each disk of type Hyperdisk-balanced (except for boot/usrsap/shared disks)." - default = 750 + default = 750 } # @@ -343,7 +343,7 @@ variable "hyperdisk_balanced_throughput_default" { # variable "data_disk_type_override" { - type = string + type = string description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the data disk." validation { condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.data_disk_type_override) @@ -352,7 +352,7 @@ variable "data_disk_type_override" { default = "" } variable "log_disk_type_override" { - type = string + type = string description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the log disk." validation { condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.log_disk_type_override) @@ -361,7 +361,7 @@ variable "log_disk_type_override" { default = "" } variable "shared_disk_type_override" { - type = string + type = string description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the shared disk." validation { condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.shared_disk_type_override) @@ -370,7 +370,7 @@ variable "shared_disk_type_override" { default = "" } variable "usrsap_disk_type_override" { - type = string + type = string description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the /usr/sap disk." validation { condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.usrsap_disk_type_override) @@ -380,115 +380,115 @@ variable "usrsap_disk_type_override" { } variable "unified_disk_size_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the primary's unified disk, that is based off of the machine_type." - default = null + default = null } variable "unified_worker_disk_size_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the unified worker disk(s), that is based off of the machine_type." - default = null + default = null } variable "data_disk_size_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the data disk(s), that is based off of the machine_type." - default = null + default = null } variable "log_disk_size_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the log disk(s), that is based off of the machine_type." - default = null + default = null } variable "shared_disk_size_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the shared disk, that is based off of the machine_type." - default = null + default = null } variable "usrsap_disk_size_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the /usr/sap disk(s), that is based off of the machine_type." - default = null + default = null } variable "unified_disk_iops_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the primary's unified disk will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "unified_worker_disk_iops_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the unified worker disk(s) will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "data_disk_iops_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the data disk(s) will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "log_disk_iops_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the log disk(s) will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "shared_disk_iops_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the shared disk will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "usrsap_disk_iops_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the /usr/sap disk(s) will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "backup_disk_iops_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the backup disk(s) will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "unified_disk_throughput_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the primary's unified disk will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "unified_worker_disk_throughput_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the unified worker disk(s) will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "data_disk_throughput_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the data disk(s) will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "log_disk_throughput_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the log disk(s) will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "shared_disk_throughput_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the shared disk will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "usrsap_disk_throughput_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the /usr/sap disk(s) will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "backup_disk_throughput_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the backup disk(s) will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "primary_startup_url" { type = string description = "Startup script to be executed when the VM boots, should not be overridden." - default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform/sap_hana/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform" + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform" } variable "secondary_startup_url" { type = string - default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform/sap_hana/startup_secondary.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform" + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana/startup_secondary.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform" description = "DO NOT USE" } diff --git a/modules/sap_hana/versions.tf b/modules/sap_hana/versions.tf index 27705ac0..bd9c34c5 100644 --- a/modules/sap_hana/versions.tf +++ b/modules/sap_hana/versions.tf @@ -16,9 +16,12 @@ terraform { required_version = ">=0.12.6" required_providers { - google = {} + google = { + source = "hashicorp/google" + version = ">= 4.0.0, < 6" + } assert = { - source = "bwoznicki/assert" + source = "bwoznicki/assert" version = "0.0.1" } } diff --git a/modules/sap_hana_ha/main.tf b/modules/sap_hana_ha/main.tf index 40ac897b..0f791809 100644 --- a/modules/sap_hana_ha/main.tf +++ b/modules/sap_hana_ha/main.tf @@ -16,8 +16,8 @@ # # Terraform SAP HANA HA for Google Cloud # -# Version: 2.0.202402230649 -# Build Hash: c745a89b214d491fa9b641e2fff78abfe9965016 +# Version: 2.0.202404101403 +# Build Hash: 4d5e66e2ca20a6d498491377677dcc2f3579ebd7 # ################################################################################ @@ -25,90 +25,105 @@ ################################################################################ locals { mem_size_map = { - "n1-highmem-32" = 208 - "n1-highmem-64" = 416 - "n1-highmem-96" = 624 - "n1-megamem-96" = 1433 - "n2-highmem-32" = 256 - "n2-highmem-48" = 384 - "n2-highmem-64" = 512 - "n2-highmem-80" = 640 - "n2-highmem-96" = 768 - "n2-highmem-128" = 864 - "n1-ultramem-40" = 961 - "n1-ultramem-80" = 1922 - "n1-ultramem-160" = 3844 - "m1-megamem-96" = 1433 - "m1-ultramem-40" = 961 - "m1-ultramem-80" = 1922 - "m1-ultramem-160" = 3844 - "m2-ultramem-208" = 5888 - "m2-megamem-416" = 5888 - "m2-hypermem-416" = 8832 - "m2-ultramem-416" = 11744 - "m3-megamem-64" = 976 - "m3-megamem-128" = 1952 - "m3-ultramem-32" = 976 - "m3-ultramem-64" = 1952 - "m3-ultramem-128" = 3904 - "c3-standard-44" = 176 - "c3-highmem-44" = 352 - "c3-highmem-88" = 704 - "c3-highmem-176" = 1408 + "n1-highmem-32" = 208 + "n1-highmem-64" = 416 + "n1-highmem-96" = 624 + "n1-megamem-96" = 1433 + "n2-highmem-32" = 256 + "n2-highmem-48" = 384 + "n2-highmem-64" = 512 + "n2-highmem-80" = 640 + "n2-highmem-96" = 768 + "n2-highmem-128" = 864 + "n1-ultramem-40" = 961 + "n1-ultramem-80" = 1922 + "n1-ultramem-160" = 3844 + "m1-megamem-96" = 1433 + "m1-ultramem-40" = 961 + "m1-ultramem-80" = 1922 + "m1-ultramem-160" = 3844 + "m2-ultramem-208" = 5888 + "m2-megamem-416" = 5888 + "m2-hypermem-416" = 8832 + "m2-ultramem-416" = 11744 + "m3-megamem-64" = 976 + "m3-megamem-128" = 1952 + "m3-ultramem-32" = 976 + "m3-ultramem-64" = 1952 + "m3-ultramem-128" = 3904 + "c3-standard-44" = 176 + "c3-highmem-44" = 352 + "c3-highmem-88" = 704 + "c3-highmem-176" = 1408 + "c3-standard-192-metal" = 768 + "c3-highcpu-192-metal" = 512 + "c3-highmem-192-metal" = 1536 + "x4-megamem-960-metal" = 16384 + "x4-megamem-1440-metal" = 24576 + "x4-megamem-1920-metal" = 32768 } cpu_platform_map = { - "n1-highmem-32" = "Intel Broadwell" - "n1-highmem-64" = "Intel Broadwell" - "n1-highmem-96" = "Intel Skylake" - "n1-megamem-96" = "Intel Skylake" - "n2-highmem-32" = "Automatic" - "n2-highmem-48" = "Automatic" - "n2-highmem-64" = "Automatic" - "n2-highmem-80" = "Automatic" - "n2-highmem-96" = "Automatic" - "n2-highmem-128" = "Automatic" - "n1-ultramem-40" = "Automatic" - "n1-ultramem-80" = "Automatic" - "n1-ultramem-160" = "Automatic" - "m1-megamem-96" = "Intel Skylake" - "m1-ultramem-40" = "Automatic" - "m1-ultramem-80" = "Automatic" - "m1-ultramem-160" = "Automatic" - "m2-ultramem-208" = "Automatic" - "m2-megamem-416" = "Automatic" - "m2-hypermem-416" = "Automatic" - "m2-ultramem-416" = "Automatic" - "m3-megamem-64" = "Automatic" - "m3-megamem-128" = "Automatic" - "m3-ultramem-32" = "Automatic" - "m3-ultramem-64" = "Automatic" - "m3-ultramem-128" = "Automatic" - "c3-standard-44" = "Automatic" - "c3-highmem-44" = "Automatic" - "c3-highmem-88" = "Automatic" - "c3-highmem-176" = "Automatic" + "n1-highmem-32" = "Intel Broadwell" + "n1-highmem-64" = "Intel Broadwell" + "n1-highmem-96" = "Intel Skylake" + "n1-megamem-96" = "Intel Skylake" + "n2-highmem-32" = "Automatic" + "n2-highmem-48" = "Automatic" + "n2-highmem-64" = "Automatic" + "n2-highmem-80" = "Automatic" + "n2-highmem-96" = "Automatic" + "n2-highmem-128" = "Automatic" + "n1-ultramem-40" = "Automatic" + "n1-ultramem-80" = "Automatic" + "n1-ultramem-160" = "Automatic" + "m1-megamem-96" = "Intel Skylake" + "m1-ultramem-40" = "Automatic" + "m1-ultramem-80" = "Automatic" + "m1-ultramem-160" = "Automatic" + "m2-ultramem-208" = "Automatic" + "m2-megamem-416" = "Automatic" + "m2-hypermem-416" = "Automatic" + "m2-ultramem-416" = "Automatic" + "m3-megamem-64" = "Automatic" + "m3-megamem-128" = "Automatic" + "m3-ultramem-32" = "Automatic" + "m3-ultramem-64" = "Automatic" + "m3-ultramem-128" = "Automatic" + "c3-standard-44" = "Automatic" + "c3-highmem-44" = "Automatic" + "c3-highmem-88" = "Automatic" + "c3-highmem-176" = "Automatic" + "c3-standard-192-metal" = "Automatic" + "c3-highcpu-192-metal" = "Automatic" + "c3-highmem-192-metal" = "Automatic" + "x4-megamem-960-metal" = "Automatic" + "x4-megamem-1440-metal" = "Automatic" + "x4-megamem-1920-metal" = "Automatic" } + native_bm = length(regexall("metal", var.machine_type)) > 0 + # Minimum disk sizes are used to ensure throughput. Extreme disks don't need this. # All 'over provisioned' capacity is to go onto the data disk. + final_disk_type = var.disk_type == "" ? (local.native_bm ? "hyperdisk-extreme" : "pd-ssd") : var.disk_type min_total_disk_map = { - "pd-ssd" = 550 - "pd-balanced" = 943 - "pd-extreme" = 0 + "pd-ssd" = 550 + "pd-balanced" = 943 + "pd-extreme" = 0 "hyperdisk-balanced" = 0 - "hyperdisk-extreme" = 0 + "hyperdisk-extreme" = 0 } - min_total_disk = local.min_total_disk_map[var.disk_type] + min_total_disk = local.min_total_disk_map[local.final_disk_type] - mem_size = lookup(local.mem_size_map, var.machine_type, 320) - hana_log_size = ceil(min(512, max(64, local.mem_size / 2))) - hana_data_size_min = ceil(local.mem_size * 12 / 10) - hana_shared_size = min(1024, local.mem_size) - hana_usrsap_size = 32 + mem_size = lookup(local.mem_size_map, var.machine_type, 320) + hana_log_size = ceil(min(512, max(64, local.mem_size / 2))) + hana_data_size_min = ceil(local.mem_size * 12 / 10) + hana_shared_size = min(1024, local.mem_size) + hana_usrsap_size = 32 default_boot_size = 30 - hana_data_size = max(local.hana_data_size_min, local.min_total_disk - local.hana_usrsap_size - local.hana_log_size - local.hana_shared_size ) + hana_data_size = max(local.hana_data_size_min, local.min_total_disk - local.hana_usrsap_size - local.hana_log_size - local.hana_shared_size) all_network_tag_items = concat(var.network_tags, ["sap-${local.healthcheck_name}-port"]) network_tags = local.all_network_tag_items @@ -117,98 +132,98 @@ locals { # ensure the combined disk meets minimum size/performance pd_size = ceil(max(local.min_total_disk, local.hana_log_size + local.hana_data_size_min + local.hana_shared_size + local.hana_usrsap_size + 1)) - temp_shared_disk_type = contains(["hyperdisk-extreme", "hyperdisk-balanced", "pd-extreme"], var.disk_type) ? "pd-balanced" : var.disk_type - temp_usrsap_disk_type = contains(["hyperdisk-extreme", "hyperdisk-balanced", "pd-extreme"], var.disk_type) ? "pd-balanced" : var.disk_type - - final_data_disk_type = var.data_disk_type_override == "" ? var.disk_type : var.data_disk_type_override - final_log_disk_type = var.log_disk_type_override == "" ? var.disk_type : var.log_disk_type_override + unified_pd_size = var.unified_disk_size_override == null ? local.pd_size : var.unified_disk_size_override + data_pd_size = var.data_disk_size_override == null ? local.hana_data_size : var.data_disk_size_override + log_pd_size = var.log_disk_size_override == null ? local.hana_log_size : var.log_disk_size_override + shared_pd_size = var.shared_disk_size_override == null ? local.hana_shared_size : var.shared_disk_size_override + usrsap_pd_size = var.usrsap_disk_size_override == null ? local.hana_usrsap_size : var.usrsap_disk_size_override + backup_pd_size = var.sap_hana_backup_size > 0 ? var.sap_hana_backup_size : 2 * local.mem_size + + # Disk types + final_data_disk_type = var.data_disk_type_override == "" ? local.final_disk_type : var.data_disk_type_override + final_log_disk_type = var.log_disk_type_override == "" ? local.final_disk_type : var.log_disk_type_override + temp_shared_disk_type = local.native_bm ? "hyperdisk-balanced" : (contains(["hyperdisk-extreme", "hyperdisk-balanced", "pd-extreme"], local.final_disk_type) ? "pd-balanced" : local.final_disk_type) + temp_usrsap_disk_type = local.native_bm ? "hyperdisk-balanced" : (contains(["hyperdisk-extreme", "hyperdisk-balanced", "pd-extreme"], local.final_disk_type) ? "pd-balanced" : local.final_disk_type) final_shared_disk_type = var.shared_disk_type_override == "" ? local.temp_shared_disk_type : var.shared_disk_type_override final_usrsap_disk_type = var.usrsap_disk_type_override == "" ? local.temp_usrsap_disk_type : var.usrsap_disk_type_override + final_backup_disk_type = var.backup_disk_type == "" ? (local.native_bm ? "hyperdisk-balanced" : "pd-balanced") : var.backup_disk_type - - unified_pd_size = var.unified_disk_size_override == null ? local.pd_size : var.unified_disk_size_override - data_pd_size = var.data_disk_size_override == null ? local.hana_data_size : var.data_disk_size_override - log_pd_size = var.log_disk_size_override == null ? local.hana_log_size : var.log_disk_size_override - shared_pd_size = var.shared_disk_size_override == null ? local.hana_shared_size : var.shared_disk_size_override - usrsap_pd_size = var.usrsap_disk_size_override == null ? local.hana_usrsap_size : var.usrsap_disk_size_override - backup_pd_size = var.sap_hana_backup_size > 0 ? var.sap_hana_backup_size : 2 * local.mem_size - - # IOPS + # Disk IOPS hdx_iops_map = { - "data" = max(10000, local.data_pd_size*2) - "log" = max(10000, local.log_pd_size*2) - "shared" = null - "usrsap" = null - "unified" = max(10000, local.data_pd_size*2) + max(10000, local.log_pd_size*2) - "worker" = max(10000, local.data_pd_size*2) + max(10000, local.log_pd_size*2) - "backup" = max(10000, 2 * local.backup_pd_size) + "data" = max(10000, local.data_pd_size * 2) + "log" = max(10000, local.log_pd_size * 2) + "shared" = null + "usrsap" = null + "unified" = max(10000, local.data_pd_size * 2) + max(10000, local.log_pd_size * 2) + "worker" = max(10000, local.data_pd_size * 2) + max(10000, local.log_pd_size * 2) + "backup" = max(10000, 2 * local.backup_pd_size) } hdb_iops_map = { - "data" = var.hyperdisk_balanced_iops_default - "log" = var.hyperdisk_balanced_iops_default - "shared" = null - "usrsap" = null + "data" = var.hyperdisk_balanced_iops_default + "log" = var.hyperdisk_balanced_iops_default + "shared" = null + "usrsap" = null "unified" = var.hyperdisk_balanced_iops_default - "worker" = var.hyperdisk_balanced_iops_default - "backup" = var.hyperdisk_balanced_iops_default + "worker" = var.hyperdisk_balanced_iops_default + "backup" = var.hyperdisk_balanced_iops_default } null_iops_map = { - "data" = null - "log" = null - "shared" = null - "usrsap" = null + "data" = null + "log" = null + "shared" = null + "usrsap" = null "unified" = null - "worker" = null - "backup" = null + "worker" = null + "backup" = null } iops_map = { - "pd-ssd" = local.null_iops_map - "pd-balanced" = local.null_iops_map - "pd-extreme" = local.hdx_iops_map + "pd-ssd" = local.null_iops_map + "pd-balanced" = local.null_iops_map + "pd-extreme" = local.hdx_iops_map "hyperdisk-balanced" = local.hdb_iops_map - "hyperdisk-extreme" = local.hdx_iops_map + "hyperdisk-extreme" = local.hdx_iops_map } - final_data_iops = var.data_disk_iops_override == null ? local.iops_map[local.final_data_disk_type]["data"] : var.data_disk_iops_override - final_log_iops = var.log_disk_iops_override == null ? local.iops_map[local.final_log_disk_type]["log"] : var.log_disk_iops_override - final_shared_iops = var.shared_disk_iops_override == null ? local.iops_map[local.final_shared_disk_type]["shared"] : var.shared_disk_iops_override - final_usrsap_iops = var.usrsap_disk_iops_override == null ? local.iops_map[local.final_usrsap_disk_type]["usrsap"] : var.usrsap_disk_iops_override - final_unified_iops = var.unified_disk_iops_override == null ? local.iops_map[var.disk_type]["unified"] : var.unified_disk_iops_override - final_backup_iops = var.backup_disk_iops_override == null ? local.iops_map[var.backup_disk_type]["backup"] : var.backup_disk_iops_override + final_data_iops = var.data_disk_iops_override == null ? local.iops_map[local.final_data_disk_type]["data"] : var.data_disk_iops_override + final_log_iops = var.log_disk_iops_override == null ? local.iops_map[local.final_log_disk_type]["log"] : var.log_disk_iops_override + final_shared_iops = var.shared_disk_iops_override == null ? local.iops_map[local.final_shared_disk_type]["shared"] : var.shared_disk_iops_override + final_usrsap_iops = var.usrsap_disk_iops_override == null ? local.iops_map[local.final_usrsap_disk_type]["usrsap"] : var.usrsap_disk_iops_override + final_unified_iops = var.unified_disk_iops_override == null ? local.iops_map[local.final_disk_type]["unified"] : var.unified_disk_iops_override + final_backup_iops = var.backup_disk_iops_override == null ? local.iops_map[local.final_backup_disk_type]["backup"] : var.backup_disk_iops_override - # THROUGHPUT + # Disk throughput MB/s hdb_throughput_map = { - "data" = var.hyperdisk_balanced_throughput_default - "log" = var.hyperdisk_balanced_throughput_default - "shared" = null - "usrsap" = null + "data" = var.hyperdisk_balanced_throughput_default + "log" = var.hyperdisk_balanced_throughput_default + "shared" = null + "usrsap" = null "unified" = var.hyperdisk_balanced_throughput_default - "worker" = var.hyperdisk_balanced_throughput_default - "backup" = var.hyperdisk_balanced_throughput_default + "worker" = var.hyperdisk_balanced_throughput_default + "backup" = var.hyperdisk_balanced_throughput_default } null_throughput_map = { - "data" = null - "log" = null - "shared" = null - "usrsap" = null + "data" = null + "log" = null + "shared" = null + "usrsap" = null "unified" = null - "worker" = null - "backup" = null + "worker" = null + "backup" = null } throughput_map = { - "pd-ssd" = local.null_throughput_map - "pd-balanced" = local.null_throughput_map - "pd-extreme" = local.null_throughput_map + "pd-ssd" = local.null_throughput_map + "pd-balanced" = local.null_throughput_map + "pd-extreme" = local.null_throughput_map "hyperdisk-balanced" = local.hdb_throughput_map - "hyperdisk-extreme" = local.null_throughput_map + "hyperdisk-extreme" = local.null_throughput_map } - final_data_throughput = var.data_disk_throughput_override == null ? local.throughput_map[local.final_data_disk_type]["data"] : var.data_disk_throughput_override - final_log_throughput = var.log_disk_throughput_override == null ? local.throughput_map[local.final_log_disk_type]["log"] : var.log_disk_throughput_override - final_shared_throughput = var.shared_disk_throughput_override == null ? local.throughput_map[local.final_shared_disk_type]["shared"] : var.shared_disk_throughput_override - final_usrsap_throughput = var.usrsap_disk_throughput_override == null ? local.throughput_map[local.final_usrsap_disk_type]["usrsap"] : var.usrsap_disk_throughput_override - final_unified_throughput = var.unified_disk_throughput_override == null ? local.throughput_map[var.disk_type]["unified"] : var.unified_disk_throughput_override - final_backup_throughput = var.backup_disk_throughput_override == null ? local.throughput_map[var.backup_disk_type]["backup"] : var.backup_disk_throughput_override + final_data_throughput = var.data_disk_throughput_override == null ? local.throughput_map[local.final_data_disk_type]["data"] : var.data_disk_throughput_override + final_log_throughput = var.log_disk_throughput_override == null ? local.throughput_map[local.final_log_disk_type]["log"] : var.log_disk_throughput_override + final_shared_throughput = var.shared_disk_throughput_override == null ? local.throughput_map[local.final_shared_disk_type]["shared"] : var.shared_disk_throughput_override + final_usrsap_throughput = var.usrsap_disk_throughput_override == null ? local.throughput_map[local.final_usrsap_disk_type]["usrsap"] : var.usrsap_disk_throughput_override + final_unified_throughput = var.unified_disk_throughput_override == null ? local.throughput_map[local.final_disk_type]["unified"] : var.unified_disk_throughput_override + final_backup_throughput = var.backup_disk_throughput_override == null ? local.throughput_map[local.final_backup_disk_type]["backup"] : var.backup_disk_throughput_override sap_vip_solution = "ILB" sap_hc_port = 60000 + var.sap_hana_instance_number @@ -268,35 +283,46 @@ locals { # HA Scaleout features mm_partially_defined = (var.majority_maker_instance_name != "") || (var.majority_maker_machine_type != "") || (var.majority_maker_zone != "") - mm_fully_defined = (var.majority_maker_instance_name != "") && (var.majority_maker_machine_type != "") && (var.majority_maker_zone != "") - mm_zone_split = split("-", var.majority_maker_zone) - mm_region = length(local.mm_zone_split) < 3 ? "" : join("-", [local.mm_zone_split[0], local.mm_zone_split[1]]) + mm_fully_defined = (var.majority_maker_instance_name != "") && (var.majority_maker_machine_type != "") && (var.majority_maker_zone != "") + mm_zone_split = split("-", var.majority_maker_zone) + mm_region = length(local.mm_zone_split) < 3 ? "" : join("-", [local.mm_zone_split[0], local.mm_zone_split[1]]) } +# tflint-ignore: terraform_unused_declarations data "assert_test" "scaleout_needs_mm" { - test = (local.mm_partially_defined && var.sap_hana_scaleout_nodes > 0) || (!local.mm_partially_defined && var.sap_hana_scaleout_nodes == 0) + test = (local.mm_partially_defined && var.sap_hana_scaleout_nodes > 0) || (!local.mm_partially_defined && var.sap_hana_scaleout_nodes == 0) throw = "sap_hana_scaleout_nodes and all majority_maker variables must be specified together: majority_maker_instance_name, majority_maker_machine_type, majority_maker_zone" } - +# tflint-ignore: terraform_unused_declarations data "assert_test" "fully_specify_mm" { - test = !local.mm_partially_defined || local.mm_fully_defined + test = !local.mm_partially_defined || local.mm_fully_defined throw = "majority_maker_instance_name, majority_maker_machine_type, and majority_maker_zone must all be specified together" } - +# tflint-ignore: terraform_unused_declarations data "assert_test" "mm_region_check" { - test = !local.mm_fully_defined || local.mm_region == local.region + test = !local.mm_fully_defined || local.mm_region == local.region throw = "Majority maker must be in the same region as the primary and secondary instances" } - +# tflint-ignore: terraform_unused_declarations resource "validation_warning" "mm_zone_warning" { condition = (var.majority_maker_zone == var.primary_zone) || (var.majority_maker_zone == var.secondary_zone) - summary = "It is recommended that the Majority Maker exist in a separate zone but same region from the primary and secondary instances." + summary = "It is recommended that the Majority Maker exist in a separate zone but same region from the primary and secondary instances." } - +# tflint-ignore: terraform_unused_declarations data "assert_test" "no_rhel_with_scaleout" { - test = var.sap_hana_scaleout_nodes == 0 || ! can(regex("rhel", var.linux_image_project)) + test = var.sap_hana_scaleout_nodes == 0 || !can(regex("rhel", var.linux_image_project)) throw = "HANA HA Scaleout deployment is currently only supported on SLES operating systems." } +# tflint-ignore: terraform_unused_declarations +data "assert_test" "hyperdisk_with_native_bm" { + test = local.native_bm ? length(regexall("hyperdisk", local.final_disk_type)) > 0 : true + throw = "Native bare metal machines only work with hyperdisks. Set 'disk_type' accordingly, e.g. 'disk_type = hyperdisk-balanced'" +} +# tflint-ignore: terraform_unused_declarations +data "assert_test" "backup_hyperdisk_with_native_bm" { + test = local.native_bm && var.include_backup_disk ? (length(regexall("hyperdisk", local.final_backup_disk_type)) > 0) : true + throw = "Native bare metal machines only work with hyperdisks. Set 'backup_disk_type' accordingly, e.g. 'backup_disk_type = hyperdisk-balanced'" +} ################################################################################ # VIPs @@ -313,16 +339,16 @@ resource "google_compute_address" "sap_hana_ha_vm_ip" { } resource "google_compute_address" "sap_hana_ha_worker_vm_ip" { - count = var.sap_hana_scaleout_nodes * 2 - name = (count.index % 2) == 0 ? "${var.primary_instance_name}w${floor(count.index / 2) + 1}-vm-ip" : "${var.secondary_instance_name}w${floor(count.index / 2) + 1}-vm-ip" - subnetwork = local.subnetwork_uri + count = var.sap_hana_scaleout_nodes * 2 + name = (count.index % 2) == 0 ? "${var.primary_instance_name}w${floor(count.index / 2) + 1}-vm-ip" : "${var.secondary_instance_name}w${floor(count.index / 2) + 1}-vm-ip" + subnetwork = local.subnetwork_uri address_type = "INTERNAL" - region = local.region - project = var.project_id + region = local.region + project = var.project_id # The worker node IPs are all in one list, alternating between primary and secondary address = (count.index % 2) == 0 ? ( length(var.primary_worker_static_ips) > floor(count.index / 2) ? var.primary_worker_static_ips[floor(count.index / 2)] : "") : ( - length(var.secondary_worker_static_ips) > floor(count.index / 2) ? var.secondary_worker_static_ips[floor(count.index / 2)] : "") + length(var.secondary_worker_static_ips) > floor(count.index / 2) ? var.secondary_worker_static_ips[floor(count.index / 2)] : "") } ################################################################################ @@ -334,7 +360,7 @@ resource "google_compute_address" "sap_hana_ha_worker_vm_ip" { resource "google_compute_disk" "sap_hana_ha_primary_boot_disks" { count = var.sap_hana_scaleout_nodes + 1 name = count.index == 0 ? "${var.primary_instance_name}-boot" : "${var.primary_instance_name}w${count.index}-boot" - type = "pd-balanced" + type = local.native_bm ? "hyperdisk-balanced" : "pd-balanced" zone = var.primary_zone size = local.default_boot_size project = var.project_id @@ -348,66 +374,66 @@ resource "google_compute_disk" "sap_hana_ha_primary_boot_disks" { } } resource "google_compute_disk" "sap_hana_ha_primary_unified_disks" { - count = var.use_single_shared_data_log_disk ? var.sap_hana_scaleout_nodes + 1 : 0 - name = count.index == 0 ? "${var.primary_instance_name}-hana" : "${var.primary_instance_name}w${count.index}-hana" - type = var.disk_type - zone = var.primary_zone - size = local.unified_pd_size - project = var.project_id - provisioned_iops = local.final_unified_iops + count = var.use_single_shared_data_log_disk ? var.sap_hana_scaleout_nodes + 1 : 0 + name = count.index == 0 ? "${var.primary_instance_name}-hana" : "${var.primary_instance_name}w${count.index}-hana" + type = local.final_disk_type + zone = var.primary_zone + size = local.unified_pd_size + project = var.project_id + provisioned_iops = local.final_unified_iops provisioned_throughput = local.final_unified_throughput } # Split data/log/sap disks resource "google_compute_disk" "sap_hana_ha_primary_data_disks" { - count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 - name = count.index == 0 ? "${var.primary_instance_name}-data" : "${var.primary_instance_name}w${count.index}-data" - type = local.final_data_disk_type - zone = var.primary_zone - size = local.data_pd_size - project = var.project_id - provisioned_iops = local.final_data_iops + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = count.index == 0 ? "${var.primary_instance_name}-data" : "${var.primary_instance_name}w${count.index}-data" + type = local.final_data_disk_type + zone = var.primary_zone + size = local.data_pd_size + project = var.project_id + provisioned_iops = local.final_data_iops provisioned_throughput = local.final_data_throughput } resource "google_compute_disk" "sap_hana_ha_primary_log_disks" { - count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 - name = count.index == 0 ? "${var.primary_instance_name}-log" : "${var.primary_instance_name}w${count.index}-log" - type = local.final_log_disk_type - zone = var.primary_zone - size = local.log_pd_size - project = var.project_id - provisioned_iops = local.final_log_iops + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = count.index == 0 ? "${var.primary_instance_name}-log" : "${var.primary_instance_name}w${count.index}-log" + type = local.final_log_disk_type + zone = var.primary_zone + size = local.log_pd_size + project = var.project_id + provisioned_iops = local.final_log_iops provisioned_throughput = local.final_log_throughput } resource "google_compute_disk" "sap_hana_ha_primary_shared_disk" { - count = var.use_single_shared_data_log_disk ? 0 : 1 - name = "${var.primary_instance_name}-shared" - type = local.final_shared_disk_type - zone = var.primary_zone - size = local.shared_pd_size - project = var.project_id - provisioned_iops = local.final_shared_iops + count = var.use_single_shared_data_log_disk ? 0 : 1 + name = "${var.primary_instance_name}-shared" + type = local.final_shared_disk_type + zone = var.primary_zone + size = local.shared_pd_size + project = var.project_id + provisioned_iops = local.final_shared_iops provisioned_throughput = local.final_shared_throughput } resource "google_compute_disk" "sap_hana_ha_primary_usrsap_disks" { - count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 - name = count.index == 0 ? "${var.primary_instance_name}-usrsap" : "${var.primary_instance_name}w${count.index}-usrsap" - type = local.final_usrsap_disk_type - zone = var.primary_zone - size = local.usrsap_pd_size - project = var.project_id - provisioned_iops = local.final_usrsap_iops + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = count.index == 0 ? "${var.primary_instance_name}-usrsap" : "${var.primary_instance_name}w${count.index}-usrsap" + type = local.final_usrsap_disk_type + zone = var.primary_zone + size = local.usrsap_pd_size + project = var.project_id + provisioned_iops = local.final_usrsap_iops provisioned_throughput = local.final_usrsap_throughput } resource "google_compute_disk" "sap_hana_ha_primary_backup_disk" { - count = var.include_backup_disk ? 1 : 0 - name = "${var.primary_instance_name}-backup" - type = var.backup_disk_type - zone = var.primary_zone - size = local.backup_pd_size - project = var.project_id - provisioned_iops = local.final_backup_iops + count = var.include_backup_disk ? 1 : 0 + name = "${var.primary_instance_name}-backup" + type = local.final_backup_disk_type + zone = var.primary_zone + size = local.backup_pd_size + project = var.project_id + provisioned_iops = local.final_backup_iops provisioned_throughput = local.final_backup_throughput } @@ -429,35 +455,42 @@ resource "google_compute_instance" "sap_hana_ha_primary_instance" { source = google_compute_disk.sap_hana_ha_primary_boot_disks[0].self_link } - dynamic attached_disk { + dynamic "scheduling" { + for_each = local.native_bm ? [1] : [] + content { + on_host_maintenance = "TERMINATE" + } + } + + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [1] : [] content { device_name = google_compute_disk.sap_hana_ha_primary_unified_disks[0].name source = google_compute_disk.sap_hana_ha_primary_unified_disks[0].self_link } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [] : [1] content { device_name = google_compute_disk.sap_hana_ha_primary_data_disks[0].name source = google_compute_disk.sap_hana_ha_primary_data_disks[0].self_link } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [] : [1] content { device_name = google_compute_disk.sap_hana_ha_primary_log_disks[0].name source = google_compute_disk.sap_hana_ha_primary_log_disks[0].self_link } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [] : [1] content { device_name = google_compute_disk.sap_hana_ha_primary_shared_disk[0].name source = google_compute_disk.sap_hana_ha_primary_shared_disk[0].self_link } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [] : [1] content { device_name = google_compute_disk.sap_hana_ha_primary_usrsap_disks[0].name @@ -465,7 +498,7 @@ resource "google_compute_instance" "sap_hana_ha_primary_instance" { } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.include_backup_disk ? [1] : [] content { device_name = google_compute_disk.sap_hana_ha_primary_backup_disk[0].name @@ -476,7 +509,7 @@ resource "google_compute_instance" "sap_hana_ha_primary_instance" { network_interface { subnetwork = local.subnetwork_uri - network_ip = google_compute_address.sap_hana_ha_vm_ip.0.address + network_ip = google_compute_address.sap_hana_ha_vm_ip[0].address nic_type = var.nic_type == "" ? null : var.nic_type # we only include access_config if public_ip is true, an empty access_config @@ -540,6 +573,7 @@ resource "google_compute_instance" "sap_hana_ha_primary_instance" { majority_maker_instance_name = local.mm_fully_defined ? var.majority_maker_instance_name : "" sap_hana_data_disk_type = local.final_data_disk_type enable_fast_restart = var.enable_fast_restart + native_bm = local.native_bm template-type = "TERRAFORM" }, local.wlm_metadata @@ -563,35 +597,42 @@ resource "google_compute_instance" "sap_hana_ha_primary_workers" { boot_disk { auto_delete = true device_name = "boot" - source = google_compute_disk.sap_hana_ha_primary_boot_disks[count.index+1].self_link + source = google_compute_disk.sap_hana_ha_primary_boot_disks[count.index + 1].self_link } - dynamic attached_disk { + dynamic "scheduling" { + for_each = local.native_bm ? [1] : [] + content { + on_host_maintenance = "TERMINATE" + } + } + + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [1] : [] content { - device_name = google_compute_disk.sap_hana_ha_primary_unified_disks[count.index+1].name - source = google_compute_disk.sap_hana_ha_primary_unified_disks[count.index+1].self_link + device_name = google_compute_disk.sap_hana_ha_primary_unified_disks[count.index + 1].name + source = google_compute_disk.sap_hana_ha_primary_unified_disks[count.index + 1].self_link } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [] : [1] content { - device_name = google_compute_disk.sap_hana_ha_primary_data_disks[count.index+1].name - source = google_compute_disk.sap_hana_ha_primary_data_disks[count.index+1].self_link + device_name = google_compute_disk.sap_hana_ha_primary_data_disks[count.index + 1].name + source = google_compute_disk.sap_hana_ha_primary_data_disks[count.index + 1].self_link } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [] : [1] content { - device_name = google_compute_disk.sap_hana_ha_primary_log_disks[count.index+1].name - source = google_compute_disk.sap_hana_ha_primary_log_disks[count.index+1].self_link + device_name = google_compute_disk.sap_hana_ha_primary_log_disks[count.index + 1].name + source = google_compute_disk.sap_hana_ha_primary_log_disks[count.index + 1].self_link } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [] : [1] content { - device_name = google_compute_disk.sap_hana_ha_primary_usrsap_disks[count.index+1].name - source = google_compute_disk.sap_hana_ha_primary_usrsap_disks[count.index+1].self_link + device_name = google_compute_disk.sap_hana_ha_primary_usrsap_disks[count.index + 1].name + source = google_compute_disk.sap_hana_ha_primary_usrsap_disks[count.index + 1].self_link } } can_ip_forward = var.can_ip_forward @@ -662,6 +703,7 @@ resource "google_compute_instance" "sap_hana_ha_primary_workers" { sap_hana_scaleout_nodes = var.sap_hana_scaleout_nodes majority_maker_instance_name = local.mm_fully_defined ? var.majority_maker_instance_name : "" enable_fast_restart = var.enable_fast_restart + native_bm = local.native_bm template-type = "TERRAFORM" }, local.wlm_metadata @@ -683,7 +725,7 @@ resource "google_compute_instance" "sap_hana_ha_primary_workers" { resource "google_compute_disk" "sap_hana_ha_secondary_boot_disks" { count = var.sap_hana_scaleout_nodes + 1 name = count.index == 0 ? "${var.secondary_instance_name}-boot" : "${var.secondary_instance_name}w${count.index}-boot" - type = "pd-balanced" + type = local.native_bm ? "hyperdisk-balanced" : "pd-balanced" zone = var.secondary_zone size = local.default_boot_size project = var.project_id @@ -697,66 +739,66 @@ resource "google_compute_disk" "sap_hana_ha_secondary_boot_disks" { } } resource "google_compute_disk" "sap_hana_ha_secondary_unified_disks" { - count = var.use_single_shared_data_log_disk ? var.sap_hana_scaleout_nodes + 1 : 0 - name = count.index == 0 ? "${var.secondary_instance_name}-hana" : "${var.secondary_instance_name}w${count.index}-hana" - type = var.disk_type - zone = var.secondary_zone - size = local.unified_pd_size - project = var.project_id - provisioned_iops = local.final_unified_iops + count = var.use_single_shared_data_log_disk ? var.sap_hana_scaleout_nodes + 1 : 0 + name = count.index == 0 ? "${var.secondary_instance_name}-hana" : "${var.secondary_instance_name}w${count.index}-hana" + type = local.final_disk_type + zone = var.secondary_zone + size = local.unified_pd_size + project = var.project_id + provisioned_iops = local.final_unified_iops provisioned_throughput = local.final_unified_throughput } # Split data/log/sap disks resource "google_compute_disk" "sap_hana_ha_secondary_data_disks" { - count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 - name = count.index == 0 ? "${var.secondary_instance_name}-data" : "${var.secondary_instance_name}w${count.index}-data" - type = local.final_data_disk_type - zone = var.secondary_zone - size = local.data_pd_size - project = var.project_id - provisioned_iops = local.final_data_iops + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = count.index == 0 ? "${var.secondary_instance_name}-data" : "${var.secondary_instance_name}w${count.index}-data" + type = local.final_data_disk_type + zone = var.secondary_zone + size = local.data_pd_size + project = var.project_id + provisioned_iops = local.final_data_iops provisioned_throughput = local.final_data_throughput } resource "google_compute_disk" "sap_hana_ha_secondary_log_disks" { - count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 - name = count.index == 0 ? "${var.secondary_instance_name}-log" : "${var.secondary_instance_name}w${count.index}-log" - type = local.final_log_disk_type - zone = var.secondary_zone - size = local.log_pd_size - project = var.project_id - provisioned_iops = local.final_log_iops + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = count.index == 0 ? "${var.secondary_instance_name}-log" : "${var.secondary_instance_name}w${count.index}-log" + type = local.final_log_disk_type + zone = var.secondary_zone + size = local.log_pd_size + project = var.project_id + provisioned_iops = local.final_log_iops provisioned_throughput = local.final_log_throughput } resource "google_compute_disk" "sap_hana_ha_secondary_shared_disk" { - count = var.use_single_shared_data_log_disk ? 0 : 1 - name = "${var.secondary_instance_name}-shared" - type = local.final_shared_disk_type - zone = var.secondary_zone - size = local.shared_pd_size - project = var.project_id - provisioned_iops = local.final_shared_iops + count = var.use_single_shared_data_log_disk ? 0 : 1 + name = "${var.secondary_instance_name}-shared" + type = local.final_shared_disk_type + zone = var.secondary_zone + size = local.shared_pd_size + project = var.project_id + provisioned_iops = local.final_shared_iops provisioned_throughput = local.final_shared_throughput } resource "google_compute_disk" "sap_hana_ha_secondary_usrsap_disks" { - count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 - name = count.index == 0 ? "${var.secondary_instance_name}-usrsap" : "${var.secondary_instance_name}w${count.index}-usrsap" - type = local.final_usrsap_disk_type - zone = var.secondary_zone - size = local.usrsap_pd_size - project = var.project_id - provisioned_iops = local.final_usrsap_iops + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = count.index == 0 ? "${var.secondary_instance_name}-usrsap" : "${var.secondary_instance_name}w${count.index}-usrsap" + type = local.final_usrsap_disk_type + zone = var.secondary_zone + size = local.usrsap_pd_size + project = var.project_id + provisioned_iops = local.final_usrsap_iops provisioned_throughput = local.final_usrsap_throughput } resource "google_compute_disk" "sap_hana_ha_secondary_backup_disk" { - count = var.include_backup_disk ? 1 : 0 - name = "${var.secondary_instance_name}-backup" - type = var.backup_disk_type - zone = var.secondary_zone - size = local.backup_pd_size - project = var.project_id - provisioned_iops = local.final_backup_iops + count = var.include_backup_disk ? 1 : 0 + name = "${var.secondary_instance_name}-backup" + type = local.final_backup_disk_type + zone = var.secondary_zone + size = local.backup_pd_size + project = var.project_id + provisioned_iops = local.final_backup_iops provisioned_throughput = local.final_backup_throughput } @@ -777,35 +819,42 @@ resource "google_compute_instance" "sap_hana_ha_secondary_instance" { source = google_compute_disk.sap_hana_ha_secondary_boot_disks[0].self_link } - dynamic attached_disk { + dynamic "scheduling" { + for_each = local.native_bm ? [1] : [] + content { + on_host_maintenance = "TERMINATE" + } + } + + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [1] : [] content { device_name = google_compute_disk.sap_hana_ha_secondary_unified_disks[0].name source = google_compute_disk.sap_hana_ha_secondary_unified_disks[0].self_link } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [] : [1] content { device_name = google_compute_disk.sap_hana_ha_secondary_data_disks[0].name source = google_compute_disk.sap_hana_ha_secondary_data_disks[0].self_link } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [] : [1] content { device_name = google_compute_disk.sap_hana_ha_secondary_log_disks[0].name source = google_compute_disk.sap_hana_ha_secondary_log_disks[0].self_link } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [] : [1] content { device_name = google_compute_disk.sap_hana_ha_secondary_shared_disk[0].name source = google_compute_disk.sap_hana_ha_secondary_shared_disk[0].self_link } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [] : [1] content { device_name = google_compute_disk.sap_hana_ha_secondary_usrsap_disks[0].name @@ -813,7 +862,7 @@ resource "google_compute_instance" "sap_hana_ha_secondary_instance" { } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.include_backup_disk ? [1] : [] content { device_name = google_compute_disk.sap_hana_ha_secondary_backup_disk[0].name @@ -825,8 +874,8 @@ resource "google_compute_instance" "sap_hana_ha_secondary_instance" { network_interface { subnetwork = local.subnetwork_uri - network_ip = google_compute_address.sap_hana_ha_vm_ip.1.address - nic_type = var.nic_type == "" ? null : var.nic_type + network_ip = google_compute_address.sap_hana_ha_vm_ip[1].address + nic_type = var.nic_type == "" ? null : var.nic_type # we only include access_config if public_ip is true, an empty access_config # will create an ephemeral public ip dynamic "access_config" { @@ -887,6 +936,7 @@ resource "google_compute_instance" "sap_hana_ha_secondary_instance" { sap_hana_scaleout_nodes = var.sap_hana_scaleout_nodes majority_maker_instance_name = local.mm_fully_defined ? var.majority_maker_instance_name : "" enable_fast_restart = var.enable_fast_restart + native_bm = local.native_bm template-type = "TERRAFORM" }, local.wlm_metadata @@ -899,7 +949,7 @@ resource "google_compute_instance" "sap_hana_ha_secondary_instance" { } resource "google_compute_instance" "sap_hana_ha_secondary_workers" { - count = var.sap_hana_scaleout_nodes + count = var.sap_hana_scaleout_nodes name = "${var.secondary_instance_name}w${count.index + 1}" machine_type = var.machine_type zone = var.secondary_zone @@ -910,35 +960,42 @@ resource "google_compute_instance" "sap_hana_ha_secondary_workers" { boot_disk { auto_delete = true device_name = "boot" - source = google_compute_disk.sap_hana_ha_secondary_boot_disks[count.index+1].self_link + source = google_compute_disk.sap_hana_ha_secondary_boot_disks[count.index + 1].self_link + } + + dynamic "scheduling" { + for_each = local.native_bm ? [1] : [] + content { + on_host_maintenance = "TERMINATE" + } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [1] : [] content { - device_name = google_compute_disk.sap_hana_ha_secondary_unified_disks[count.index+1].name - source = google_compute_disk.sap_hana_ha_secondary_unified_disks[count.index+1].self_link + device_name = google_compute_disk.sap_hana_ha_secondary_unified_disks[count.index + 1].name + source = google_compute_disk.sap_hana_ha_secondary_unified_disks[count.index + 1].self_link } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [] : [1] content { - device_name = google_compute_disk.sap_hana_ha_secondary_data_disks[count.index+1].name - source = google_compute_disk.sap_hana_ha_secondary_data_disks[count.index+1].self_link + device_name = google_compute_disk.sap_hana_ha_secondary_data_disks[count.index + 1].name + source = google_compute_disk.sap_hana_ha_secondary_data_disks[count.index + 1].self_link } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [] : [1] content { - device_name = google_compute_disk.sap_hana_ha_secondary_log_disks[count.index+1].name - source = google_compute_disk.sap_hana_ha_secondary_log_disks[count.index+1].self_link + device_name = google_compute_disk.sap_hana_ha_secondary_log_disks[count.index + 1].name + source = google_compute_disk.sap_hana_ha_secondary_log_disks[count.index + 1].self_link } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_shared_data_log_disk ? [] : [1] content { - device_name = google_compute_disk.sap_hana_ha_secondary_usrsap_disks[count.index+1].name - source = google_compute_disk.sap_hana_ha_secondary_usrsap_disks[count.index+1].self_link + device_name = google_compute_disk.sap_hana_ha_secondary_usrsap_disks[count.index + 1].name + source = google_compute_disk.sap_hana_ha_secondary_usrsap_disks[count.index + 1].self_link } } @@ -948,7 +1005,7 @@ resource "google_compute_instance" "sap_hana_ha_secondary_workers" { subnetwork = local.subnetwork_uri # The worker node IPs are all in one list, alternating between primary and secondary network_ip = google_compute_address.sap_hana_ha_worker_vm_ip[count.index * 2 + 1].address - nic_type = var.nic_type == "" ? null : var.nic_type + nic_type = var.nic_type == "" ? null : var.nic_type # we only include access_config if public_ip is true, an empty access_config # will create an ephemeral public ip dynamic "access_config" { @@ -1009,6 +1066,7 @@ resource "google_compute_instance" "sap_hana_ha_secondary_workers" { sap_hana_scaleout_nodes = var.sap_hana_scaleout_nodes majority_maker_instance_name = local.mm_fully_defined ? var.majority_maker_instance_name : "" enable_fast_restart = var.enable_fast_restart + native_bm = local.native_bm template-type = "TERRAFORM" }, local.wlm_metadata @@ -1105,7 +1163,7 @@ resource "google_compute_firewall" "sap_hana_ha_vpc_firewall" { target_tags = ["sap-${local.healthcheck_name}-port"] allow { protocol = "tcp" - ports = ["${local.sap_hc_port}"] + ports = [local.sap_hc_port] } } @@ -1114,7 +1172,7 @@ resource "google_compute_firewall" "sap_hana_ha_vpc_firewall" { ################################################################################ resource "google_compute_disk" "sap_majority_maker_boot_disk" { - count = local.mm_fully_defined ? 1 : 0 + count = local.mm_fully_defined ? 1 : 0 name = "${var.majority_maker_instance_name}-boot" type = "pd-balanced" zone = var.majority_maker_zone @@ -1139,11 +1197,11 @@ resource "google_compute_address" "sap_hana_majority_maker_vm_ip" { } resource "google_compute_instance" "sap_majority_maker_instance" { - count = local.mm_fully_defined ? 1 : 0 - name = var.majority_maker_instance_name + count = local.mm_fully_defined ? 1 : 0 + name = var.majority_maker_instance_name machine_type = var.majority_maker_machine_type - zone = var.majority_maker_zone - project = var.project_id + zone = var.majority_maker_zone + project = var.project_id min_cpu_platform = lookup(local.cpu_platform_map, var.majority_maker_machine_type, "Automatic") boot_disk { @@ -1155,8 +1213,8 @@ resource "google_compute_instance" "sap_majority_maker_instance" { can_ip_forward = var.can_ip_forward network_interface { subnetwork = local.subnetwork_uri - network_ip = google_compute_address.sap_hana_majority_maker_vm_ip.0.address - nic_type = var.nic_type == "" ? null : var.nic_type + network_ip = google_compute_address.sap_hana_majority_maker_vm_ip[0].address + nic_type = var.nic_type == "" ? null : var.nic_type # we only include access_config if public_ip is true, an empty access_config # will create an ephemeral public ip dynamic "access_config" { @@ -1206,7 +1264,7 @@ resource "google_compute_instance" "sap_majority_maker_instance" { template-type = "TERRAFORM" }, local.wlm_metadata - ) + ) lifecycle { # Ignore changes in the instance metadata, since it is modified by the SAP startup script. diff --git a/modules/sap_hana_ha/outputs.tf b/modules/sap_hana_ha/outputs.tf index c920b284..e2023a3d 100644 --- a/modules/sap_hana_ha/outputs.tf +++ b/modules/sap_hana_ha/outputs.tf @@ -19,7 +19,7 @@ output "sap_hana_ha_primary_instance_self_link" { } output "sap_hana_ha_primary_worker_self_links" { description = "Self-link for the worker nodes in the primary SAP HANA HA instance." - value = google_compute_instance.sap_hana_ha_primary_workers.*.self_link + value = google_compute_instance.sap_hana_ha_primary_workers[*].self_link } output "sap_hana_ha_secondary_instance_self_link" { description = "Self-link for the secondary SAP HANA HA instance created." @@ -27,13 +27,13 @@ output "sap_hana_ha_secondary_instance_self_link" { } output "sap_hana_ha_secondary_worker_self_links" { description = "Self-link for the worker nodes in the secondary SAP HANA HA instance." - value = google_compute_instance.sap_hana_ha_secondary_workers.*.self_link + value = google_compute_instance.sap_hana_ha_secondary_workers[*].self_link } output "sap_hana_ha_loadbalander_link" { description = "Link to the optional load balancer" - value = google_compute_region_backend_service.sap_hana_ha_loadbalancer.*.self_link + value = google_compute_region_backend_service.sap_hana_ha_loadbalancer[*].self_link } output "sap_hana_ha_firewall_link" { description = "Link to the optional fire wall" - value = google_compute_firewall.sap_hana_ha_vpc_firewall.*.self_link + value = google_compute_firewall.sap_hana_ha_vpc_firewall[*].self_link } diff --git a/modules/sap_hana_ha/variables.tf b/modules/sap_hana_ha/variables.tf index 650b4c81..fe96bdbb 100644 --- a/modules/sap_hana_ha/variables.tf +++ b/modules/sap_hana_ha/variables.tf @@ -70,10 +70,10 @@ variable "sap_hana_deployment_bucket" { type = string description = "The Cloud Storage path that contains the SAP HANA media, do not include gs://. If this is not defined, the GCE instance will be provisioned without SAP HANA installed." validation { - condition = (! (length(regexall( "gs:", var.sap_hana_deployment_bucket)) > 0)) + condition = (!(length(regexall("gs:", var.sap_hana_deployment_bucket)) > 0)) error_message = "The sap_hana_deployment_bucket must only contain the Cloud Storage path, which includes the bucket name and the names of any folders. Do not include gs://." } - default = "" + default = "" } variable "sap_hana_sid" { @@ -263,23 +263,23 @@ variable "nic_type" { condition = contains(["VIRTIO_NET", "GVNIC", ""], var.nic_type) error_message = "The nic_type must be either GVNIC or VIRTIO_NET." } - default = "" + default = "" } variable "disk_type" { - type = string - description = "Optional - The default disk type to use on all disks deployed. Extreme disks are not supported on all machine types. See https://cloud.google.com/compute/docs/disks/ for details." + type = string + description = "Optional - The default disk type to use for disk(s) containing log and data volumes. The default is hyperdisk-extreme for native bare metal machines and pd-ssd otherwise. Not all disk are supported on all machine types - see https://cloud.google.com/compute/docs/disks/ for details." validation { - condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme"], var.disk_type) + condition = contains(["", "pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme"], var.disk_type) error_message = "The disk_type must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." } - default = "pd-ssd" + default = "" } variable "use_single_shared_data_log_disk" { - type = bool + type = bool description = "Optional - By default three separate disk for data, logs, and shared will be made. If set to true, one disk will be used instead." - default = false + default = false } variable "include_backup_disk" { @@ -289,27 +289,27 @@ variable "include_backup_disk" { } variable "sap_hana_scaleout_nodes" { - type = number + type = number description = "Optional - Specify to add scaleout nodes to both HA instances." - default = 0 + default = 0 } variable "majority_maker_instance_name" { - type = string + type = string description = "Optional - Name to use for the Majority Maker instance. Must be provided if scaleout_nodes > 0." - default = "" + default = "" } variable "majority_maker_machine_type" { - type = string + type = string description = "Optional - The machine type to use for the Majority Maker instance. Must be provided if scaleout_nodes > 0." - default = "" + default = "" } variable "majority_maker_zone" { - type = string + type = string description = "Optional - The zone in which the Majority Maker instance will be deployed. Must be provided if scaleout_nodes > 0. It is recommended for this to be different from the zones the primary and secondary instance are deployed in." - default = "" + default = "" } variable "primary_static_ip" { @@ -319,7 +319,7 @@ variable "primary_static_ip" { condition = var.primary_static_ip == "" || can(regex("^(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$", var.primary_static_ip)) error_message = "The primary_static_ip must be a valid IP address." } - default = "" + default = "" } variable "secondary_static_ip" { @@ -329,7 +329,7 @@ variable "secondary_static_ip" { condition = var.secondary_static_ip == "" || can(regex("^(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$", var.secondary_static_ip)) error_message = "The secondary_static_ip must be a valid IP address." } - default = "" + default = "" } variable "primary_worker_static_ips" { @@ -341,7 +341,7 @@ variable "primary_worker_static_ips" { ]) error_message = "All primary_worker_static_ips must be valid IP addresses." } - default = [] + default = [] } variable "secondary_worker_static_ips" { @@ -353,30 +353,30 @@ variable "secondary_worker_static_ips" { ]) error_message = "All secondary_worker_static_ips must be valid IP addresses." } - default = [] + default = [] } variable "backup_disk_type" { type = string - description = "Optional - The default is pd-balanced, only used if a backup disk is needed." - default = "pd-balanced" + description = "Optional - The default is hyperdisk-balanced for native bare metal machines and pd-balanced otherwise, only used if a backup disk is needed." validation { - condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", "pd-standard"], var.backup_disk_type) + condition = contains(["", "pd-ssd", "pd-balanced", "pd-standard", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme"], var.backup_disk_type) error_message = "The disk_type must be either pd-ssd, pd-balanced, pd-standard, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." } + default = "" } variable "hyperdisk_balanced_iops_default" { - type = number + type = number description = "Optional - default is 3000. Number of IOPS that is set for each disk of type Hyperdisk-balanced (except for boot/usrsap/shared disks)." - default = 3000 + default = 3000 } variable "hyperdisk_balanced_throughput_default" { - type = number + type = number description = "Optional - default is 750. Throughput in MB/s that is set for each disk of type Hyperdisk-balanced (except for boot/usrsap/shared disks)." - default = 750 + default = 750 } variable "enable_fast_restart" { @@ -390,7 +390,7 @@ variable "enable_fast_restart" { # variable "data_disk_type_override" { - type = string + type = string description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the data disk." validation { condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.data_disk_type_override) @@ -399,7 +399,7 @@ variable "data_disk_type_override" { default = "" } variable "log_disk_type_override" { - type = string + type = string description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the log disk." validation { condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.log_disk_type_override) @@ -408,7 +408,7 @@ variable "log_disk_type_override" { default = "" } variable "shared_disk_type_override" { - type = string + type = string description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the shared disk." validation { condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.shared_disk_type_override) @@ -417,7 +417,7 @@ variable "shared_disk_type_override" { default = "" } variable "usrsap_disk_type_override" { - type = string + type = string description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the /usr/sap disk." validation { condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.usrsap_disk_type_override) @@ -426,90 +426,90 @@ variable "usrsap_disk_type_override" { default = "" } variable "unified_disk_size_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the primary disk(s), that is based off of the machine_type." - default = null + default = null } variable "data_disk_size_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the data disk(s), that is based off of the machine_type." - default = null + default = null } variable "log_disk_size_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the log disk(s), that is based off of the machine_type." - default = null + default = null } variable "shared_disk_size_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the shared disk, that is based off of the machine_type." - default = null + default = null } variable "usrsap_disk_size_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the /usr/sap disk(s), that is based off of the machine_type." - default = null + default = null } variable "unified_disk_iops_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the primary's unified disk will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "data_disk_iops_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the data disk(s) will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "log_disk_iops_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the log disk(s) will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "shared_disk_iops_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the shared disk will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "usrsap_disk_iops_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the /usr/sap disk(s) will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "backup_disk_iops_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the backup disk(s) will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "unified_disk_throughput_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the primary's unified disk will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "data_disk_throughput_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the data disk(s) will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "log_disk_throughput_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the log disk(s) will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "shared_disk_throughput_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the shared disk will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "usrsap_disk_throughput_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the /usr/sap disk(s) will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "backup_disk_throughput_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the backup disk(s) will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "wlm_deployment_name" { @@ -527,24 +527,24 @@ variable "is_work_load_management_deployment" { variable "primary_startup_url" { type = string description = "Startup script to be executed when the VM boots, should not be overridden." - default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform/sap_hana_ha/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform" + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana_ha/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform" } variable "worker_startup_url" { type = string description = "Startup script to be executed when the worker VM boots, should not be overridden." - default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform/sap_hana_ha/startup_worker.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform" + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana_ha/startup_worker.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform" } variable "secondary_startup_url" { type = string - default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform/sap_hana_ha/startup_secondary.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform" + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana_ha/startup_secondary.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform" description = "DO NOT USE" } variable "majority_maker_startup_url" { type = string - default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform/sap_hana_ha/startup_majority_maker.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform" + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana_ha/startup_majority_maker.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform" description = "DO NOT USE" } variable "can_ip_forward" { diff --git a/modules/sap_hana_ha/versions.tf b/modules/sap_hana_ha/versions.tf index 73afcc7d..ba0cb846 100644 --- a/modules/sap_hana_ha/versions.tf +++ b/modules/sap_hana_ha/versions.tf @@ -16,13 +16,16 @@ terraform { required_version = ">=0.12.6" required_providers { - google = {} + google = { + source = "hashicorp/google" + version = ">= 4.0.0, < 6" + } assert = { - source = "bwoznicki/assert" + source = "bwoznicki/assert" version = "0.0.1" } validation = { - source = "tlkamp/validation" + source = "tlkamp/validation" version = "1.0.0" } } diff --git a/modules/sap_hana_scaleout/main.tf b/modules/sap_hana_scaleout/main.tf index 0f0b142c..e7b0ed4c 100644 --- a/modules/sap_hana_scaleout/main.tf +++ b/modules/sap_hana_scaleout/main.tf @@ -17,8 +17,8 @@ # Terraform SAP HANA Scaleout for Google Cloud # # -# Version: 2.0.202402230649 -# Build Hash: c745a89b214d491fa9b641e2fff78abfe9965016 +# Version: 2.0.202404101403 +# Build Hash: 4d5e66e2ca20a6d498491377677dcc2f3579ebd7 # ################################################################################ @@ -26,155 +26,171 @@ ################################################################################ locals { mem_size_map = { - "n1-highmem-32" = 208 - "n1-highmem-64" = 416 - "n1-highmem-96" = 624 - "n1-megamem-96" = 1433 - "n2-highmem-32" = 256 - "n2-highmem-48" = 384 - "n2-highmem-64" = 512 - "n2-highmem-80" = 640 - "n2-highmem-96" = 768 - "n2-highmem-128" = 864 - "n1-ultramem-40" = 961 - "n1-ultramem-80" = 1922 - "n1-ultramem-160" = 3844 - "m1-megamem-96" = 1433 - "m1-ultramem-40" = 961 - "m1-ultramem-80" = 1922 - "m1-ultramem-160" = 3844 - "m2-ultramem-208" = 5888 - "m2-megamem-416" = 5888 - "m2-hypermem-416" = 8832 - "m2-ultramem-416" = 11744 - "m3-megamem-64" = 976 - "m3-megamem-128" = 1952 - "m3-ultramem-32" = 976 - "m3-ultramem-64" = 1952 - "m3-ultramem-128" = 3904 - "c3-standard-44" = 176 - "c3-highmem-44" = 352 - "c3-highmem-88" = 704 - "c3-highmem-176" = 1408 + "n1-highmem-32" = 208 + "n1-highmem-64" = 416 + "n1-highmem-96" = 624 + "n1-megamem-96" = 1433 + "n2-highmem-32" = 256 + "n2-highmem-48" = 384 + "n2-highmem-64" = 512 + "n2-highmem-80" = 640 + "n2-highmem-96" = 768 + "n2-highmem-128" = 864 + "n1-ultramem-40" = 961 + "n1-ultramem-80" = 1922 + "n1-ultramem-160" = 3844 + "m1-megamem-96" = 1433 + "m1-ultramem-40" = 961 + "m1-ultramem-80" = 1922 + "m1-ultramem-160" = 3844 + "m2-ultramem-208" = 5888 + "m2-megamem-416" = 5888 + "m2-hypermem-416" = 8832 + "m2-ultramem-416" = 11744 + "m3-megamem-64" = 976 + "m3-megamem-128" = 1952 + "m3-ultramem-32" = 976 + "m3-ultramem-64" = 1952 + "m3-ultramem-128" = 3904 + "c3-standard-44" = 176 + "c3-highmem-44" = 352 + "c3-highmem-88" = 704 + "c3-highmem-176" = 1408 + "c3-standard-192-metal" = 768 + "c3-highcpu-192-metal" = 512 + "c3-highmem-192-metal" = 1536 + "x4-megamem-960-metal" = 16384 + "x4-megamem-1440-metal" = 24576 + "x4-megamem-1920-metal" = 32768 } cpu_platform_map = { - "n1-highmem-32" = "Intel Broadwell" - "n1-highmem-64" = "Intel Broadwell" - "n1-highmem-96" = "Intel Skylake" - "n1-megamem-96" = "Intel Skylake" - "n2-highmem-32" = "Automatic" - "n2-highmem-48" = "Automatic" - "n2-highmem-64" = "Automatic" - "n2-highmem-80" = "Automatic" - "n2-highmem-96" = "Automatic" - "n2-highmem-128" = "Automatic" - "n1-ultramem-40" = "Automatic" - "n1-ultramem-80" = "Automatic" - "n1-ultramem-160" = "Automatic" - "m1-megamem-96" = "Intel Skylake" - "m1-ultramem-40" = "Automatic" - "m1-ultramem-80" = "Automatic" - "m1-ultramem-160" = "Automatic" - "m2-ultramem-208" = "Automatic" - "m2-megamem-416" = "Automatic" - "m2-hypermem-416" = "Automatic" - "m2-ultramem-416" = "Automatic" - "m3-megamem-64" = "Automatic" - "m3-megamem-128" = "Automatic" - "m3-ultramem-32" = "Automatic" - "m3-ultramem-64" = "Automatic" - "m3-ultramem-128" = "Automatic" - "c3-standard-44" = "Automatic" - "c3-highmem-44" = "Automatic" - "c3-highmem-88" = "Automatic" - "c3-highmem-176" = "Automatic" - } - - # Minimum disk sizes are used to ensure throughput. Extreme disks don't need this. + "n1-highmem-32" = "Intel Broadwell" + "n1-highmem-64" = "Intel Broadwell" + "n1-highmem-96" = "Intel Skylake" + "n1-megamem-96" = "Intel Skylake" + "n2-highmem-32" = "Automatic" + "n2-highmem-48" = "Automatic" + "n2-highmem-64" = "Automatic" + "n2-highmem-80" = "Automatic" + "n2-highmem-96" = "Automatic" + "n2-highmem-128" = "Automatic" + "n1-ultramem-40" = "Automatic" + "n1-ultramem-80" = "Automatic" + "n1-ultramem-160" = "Automatic" + "m1-megamem-96" = "Intel Skylake" + "m1-ultramem-40" = "Automatic" + "m1-ultramem-80" = "Automatic" + "m1-ultramem-160" = "Automatic" + "m2-ultramem-208" = "Automatic" + "m2-megamem-416" = "Automatic" + "m2-hypermem-416" = "Automatic" + "m2-ultramem-416" = "Automatic" + "m3-megamem-64" = "Automatic" + "m3-megamem-128" = "Automatic" + "m3-ultramem-32" = "Automatic" + "m3-ultramem-64" = "Automatic" + "m3-ultramem-128" = "Automatic" + "c3-standard-44" = "Automatic" + "c3-highmem-44" = "Automatic" + "c3-highmem-88" = "Automatic" + "c3-highmem-176" = "Automatic" + "c3-standard-192-metal" = "Automatic" + "c3-highcpu-192-metal" = "Automatic" + "c3-highmem-192-metal" = "Automatic" + "x4-megamem-960-metal" = "Automatic" + "x4-megamem-1440-metal" = "Automatic" + "x4-megamem-1920-metal" = "Automatic" + } + + native_bm = length(regexall("metal", var.machine_type)) > 0 + + # Minimum disk sizes are used to ensure throughput. Extreme disks don't need this. # All 'over provisioned' capacity is to go onto the data disk. + final_disk_type = var.disk_type == "" ? (local.native_bm ? "hyperdisk-extreme" : "pd-ssd") : var.disk_type min_total_disk_map = { - "pd-ssd" = 550 - "pd-balanced" = 943 - "pd-extreme" = 0 + "pd-ssd" = 550 + "pd-balanced" = 943 + "pd-extreme" = 0 "hyperdisk-balanced" = 0 - "hyperdisk-extreme" = 0 + "hyperdisk-extreme" = 0 } - min_total_disk = local.min_total_disk_map[var.disk_type] + min_total_disk = local.min_total_disk_map[local.final_disk_type] - mem_size = lookup(local.mem_size_map, var.machine_type, 320) - hana_log_size = ceil(min(512, max(64, local.mem_size / 2))) - hana_data_size_min = ceil(local.mem_size * 12 / 10) + mem_size = lookup(local.mem_size_map, var.machine_type, 320) + hana_log_size = ceil(min(512, max(64, local.mem_size / 2))) + hana_data_size_min = ceil(local.mem_size * 12 / 10) - hana_data_size = max(local.hana_data_size_min, local.min_total_disk - local.hana_log_size ) - pd_size = ceil(max(local.min_total_disk, local.hana_log_size + local.hana_data_size_min + 1)) - - final_data_disk_type = var.data_disk_type_override == "" ? var.disk_type : var.data_disk_type_override - final_log_disk_type = var.log_disk_type_override == "" ? var.disk_type : var.log_disk_type_override + hana_data_size = max(local.hana_data_size_min, local.min_total_disk - local.hana_log_size) + pd_size = ceil(max(local.min_total_disk, local.hana_log_size + local.hana_data_size_min + 1)) unified_pd_size = var.unified_disk_size_override == null ? ceil(local.pd_size) : var.unified_disk_size_override - data_pd_size = var.data_disk_size_override == null ? local.hana_data_size : var.data_disk_size_override - log_pd_size = var.log_disk_size_override == null ? local.hana_log_size : var.log_disk_size_override + data_pd_size = var.data_disk_size_override == null ? local.hana_data_size : var.data_disk_size_override + log_pd_size = var.log_disk_size_override == null ? local.hana_log_size : var.log_disk_size_override + + # Disk types + final_data_disk_type = var.data_disk_type_override == "" ? local.final_disk_type : var.data_disk_type_override + final_log_disk_type = var.log_disk_type_override == "" ? local.final_disk_type : var.log_disk_type_override - # IOPS + # Disk IOPS hdx_iops_map = { - "data" = max(10000, local.data_pd_size*2) - "log" = max(10000, local.log_pd_size*2) - "shared" = null - "usrsap" = null - "unified" = max(10000, local.data_pd_size*2) + max(10000, local.log_pd_size*2) - "worker" = max(10000, local.data_pd_size*2) + max(10000, local.log_pd_size*2) + "data" = max(10000, local.data_pd_size * 2) + "log" = max(10000, local.log_pd_size * 2) + "shared" = null + "usrsap" = null + "unified" = max(10000, local.data_pd_size * 2) + max(10000, local.log_pd_size * 2) + "worker" = max(10000, local.data_pd_size * 2) + max(10000, local.log_pd_size * 2) } hdb_iops_map = { - "data" = var.hyperdisk_balanced_iops_default - "log" = var.hyperdisk_balanced_iops_default - "shared" = null - "usrsap" = null + "data" = var.hyperdisk_balanced_iops_default + "log" = var.hyperdisk_balanced_iops_default + "shared" = null + "usrsap" = null "unified" = var.hyperdisk_balanced_iops_default - "worker" = var.hyperdisk_balanced_iops_default + "worker" = var.hyperdisk_balanced_iops_default } null_iops_map = { - "data" = null - "log" = null - "shared" = null - "usrsap" = null + "data" = null + "log" = null + "shared" = null + "usrsap" = null "unified" = null - "worker" = null + "worker" = null } iops_map = { - "pd-ssd" = local.null_iops_map - "pd-balanced" = local.null_iops_map - "pd-extreme" = local.hdx_iops_map + "pd-ssd" = local.null_iops_map + "pd-balanced" = local.null_iops_map + "pd-extreme" = local.hdx_iops_map "hyperdisk-balanced" = local.hdb_iops_map - "hyperdisk-extreme" = local.hdx_iops_map + "hyperdisk-extreme" = local.hdx_iops_map } - final_data_iops = var.data_disk_iops_override == null ? local.iops_map[local.final_data_disk_type]["data"] : var.data_disk_iops_override - final_log_iops = var.log_disk_iops_override == null ? local.iops_map[local.final_log_disk_type]["log"] : var.log_disk_iops_override - final_unified_iops = var.unified_disk_iops_override == null ? local.iops_map[var.disk_type]["unified"] : var.unified_disk_iops_override + final_data_iops = var.data_disk_iops_override == null ? local.iops_map[local.final_data_disk_type]["data"] : var.data_disk_iops_override + final_log_iops = var.log_disk_iops_override == null ? local.iops_map[local.final_log_disk_type]["log"] : var.log_disk_iops_override + final_unified_iops = var.unified_disk_iops_override == null ? local.iops_map[local.final_disk_type]["unified"] : var.unified_disk_iops_override - # THROUGHPUT + # Disk throughput MB/s hdb_throughput_map = { - "data" = var.hyperdisk_balanced_throughput_default - "log" = var.hyperdisk_balanced_throughput_default + "data" = var.hyperdisk_balanced_throughput_default + "log" = var.hyperdisk_balanced_throughput_default "unified" = var.hyperdisk_balanced_throughput_default } null_throughput_map = { - "data" = null - "log" = null + "data" = null + "log" = null "unified" = null } throughput_map = { - "pd-ssd" = local.null_throughput_map - "pd-balanced" = local.null_throughput_map - "pd-extreme" = local.null_throughput_map + "pd-ssd" = local.null_throughput_map + "pd-balanced" = local.null_throughput_map + "pd-extreme" = local.null_throughput_map "hyperdisk-balanced" = local.hdb_throughput_map - "hyperdisk-extreme" = local.null_throughput_map + "hyperdisk-extreme" = local.null_throughput_map } - final_data_throughput = var.data_disk_throughput_override == null ? local.throughput_map[local.final_data_disk_type]["data"] : var.data_disk_throughput_override - final_log_throughput = var.log_disk_throughput_override == null ? local.throughput_map[local.final_log_disk_type]["log"] : var.log_disk_throughput_override - final_unified_throughput = var.unified_disk_throughput_override == null ? local.throughput_map[var.disk_type]["unified"] : var.unified_disk_throughput_override + final_data_throughput = var.data_disk_throughput_override == null ? local.throughput_map[local.final_data_disk_type]["data"] : var.data_disk_throughput_override + final_log_throughput = var.log_disk_throughput_override == null ? local.throughput_map[local.final_log_disk_type]["log"] : var.log_disk_throughput_override + final_unified_throughput = var.unified_disk_throughput_override == null ? local.throughput_map[local.final_disk_type]["unified"] : var.unified_disk_throughput_override primary_startup_url = var.sap_deployment_debug ? replace(var.primary_startup_url, "bash -s", "bash -x -s") : var.primary_startup_url secondary_startup_url = var.sap_deployment_debug ? replace(var.secondary_startup_url, "bash -s", "bash -x -s") : var.secondary_startup_url @@ -187,6 +203,13 @@ locals { "projects/${var.project_id}/regions/${local.region}/subnetworks/${var.subnetwork}") } +# tflint-ignore: terraform_unused_declarations +data "assert_test" "hyperdisk_with_native_bm" { + test = local.native_bm ? length(regexall("hyperdisk", local.final_disk_type)) > 0 : true + throw = "Native bare metal machines only work with hyperdisks. Set 'disk_type' accordingly, e.g. 'disk_type = hyperdisk-balanced'" +} + + ################################################################################ # disks ################################################################################ @@ -194,7 +217,7 @@ resource "google_compute_disk" "sap_hana_scaleout_boot_disks" { # Need a disk for primary, worker nodes, standby nodes count = var.sap_hana_worker_nodes + var.sap_hana_standby_nodes + 1 name = count.index == 0 ? "${var.instance_name}-boot" : "${var.instance_name}w${count.index}-boot" - type = "pd-balanced" + type = local.native_bm ? "hyperdisk-balanced" : "pd-balanced" zone = var.zone size = 45 project = var.project_id @@ -210,35 +233,35 @@ resource "google_compute_disk" "sap_hana_scaleout_boot_disks" { resource "google_compute_disk" "sap_hana_scaleout_disks" { # Need a pd disk for primary, worker nodes - count = var.use_single_data_log_disk ? var.sap_hana_worker_nodes + 1 : 0 - name = format("${var.instance_name}-hana%05d", count.index + 1) - type = var.disk_type - zone = var.zone - size = local.unified_pd_size - project = var.project_id - provisioned_iops = local.final_unified_iops + count = var.use_single_data_log_disk ? var.sap_hana_worker_nodes + 1 : 0 + name = format("${var.instance_name}-hana%05d", count.index + 1) + type = local.final_disk_type + zone = var.zone + size = local.unified_pd_size + project = var.project_id + provisioned_iops = local.final_unified_iops provisioned_throughput = local.final_unified_throughput } resource "google_compute_disk" "sap_hana_data_disks" { - count = var.use_single_data_log_disk ? 0 : var.sap_hana_worker_nodes + 1 - name = format("${var.instance_name}-data%05d", count.index + 1) - type = local.final_data_disk_type - zone = var.zone - size = local.data_pd_size - project = var.project_id - provisioned_iops = local.final_data_iops + count = var.use_single_data_log_disk ? 0 : var.sap_hana_worker_nodes + 1 + name = format("${var.instance_name}-data%05d", count.index + 1) + type = local.final_data_disk_type + zone = var.zone + size = local.data_pd_size + project = var.project_id + provisioned_iops = local.final_data_iops provisioned_throughput = local.final_data_throughput } resource "google_compute_disk" "sap_hana_log_disks" { - count = var.use_single_data_log_disk ? 0 : var.sap_hana_worker_nodes + 1 - name = format("${var.instance_name}-log%05d", count.index + 1) - type = local.final_log_disk_type - zone = var.zone - size = local.log_pd_size - project = var.project_id - provisioned_iops = local.final_log_iops + count = var.use_single_data_log_disk ? 0 : var.sap_hana_worker_nodes + 1 + name = format("${var.instance_name}-log%05d", count.index + 1) + type = local.final_log_disk_type + zone = var.zone + size = local.log_pd_size + project = var.project_id + provisioned_iops = local.final_log_iops provisioned_throughput = local.final_log_throughput } @@ -291,21 +314,28 @@ resource "google_compute_instance" "sap_hana_scaleout_primary_instance" { source = google_compute_disk.sap_hana_scaleout_boot_disks[0].self_link } - dynamic attached_disk { + dynamic "scheduling" { + for_each = local.native_bm ? [1] : [] + content { + on_host_maintenance = "TERMINATE" + } + } + + dynamic "attached_disk" { for_each = var.use_single_data_log_disk ? [1] : [] content { device_name = google_compute_disk.sap_hana_scaleout_disks[0].name source = google_compute_disk.sap_hana_scaleout_disks[0].self_link } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_data_log_disk ? [] : [1] content { device_name = google_compute_disk.sap_hana_data_disks[0].name source = google_compute_disk.sap_hana_data_disks[0].self_link } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_data_log_disk ? [] : [1] content { device_name = google_compute_disk.sap_hana_log_disks[0].name @@ -317,7 +347,7 @@ resource "google_compute_instance" "sap_hana_scaleout_primary_instance" { network_interface { subnetwork = local.subnetwork_uri network_ip = google_compute_address.sap_hana_vm_ip.address - nic_type = var.nic_type == "" ? null : var.nic_type + nic_type = var.nic_type == "" ? null : var.nic_type # we only include access_config if public_ip is true, an empty access_config # will create an ephemeral public ip @@ -356,6 +386,7 @@ resource "google_compute_instance" "sap_hana_scaleout_primary_instance" { sap_hana_deployment_bucket = var.sap_hana_deployment_bucket sap_hana_original_role = "master" sap_hana_sid = var.sap_hana_sid + sap_hana_sapsys_gid = var.sap_hana_sapsys_gid sap_hana_instance_number = var.sap_hana_instance_number sap_hana_sidadm_password = var.sap_hana_sidadm_password sap_hana_sidadm_password_secret = var.sap_hana_sidadm_password_secret @@ -369,6 +400,7 @@ resource "google_compute_instance" "sap_hana_scaleout_primary_instance" { sap_hana_backup_nfs = var.sap_hana_backup_nfs use_single_data_log_disk = var.use_single_data_log_disk sap_hana_data_disk_type = local.final_data_disk_type + native_bm = local.native_bm template-type = "TERRAFORM" } @@ -394,21 +426,28 @@ resource "google_compute_instance" "sap_hana_scaleout_worker_instances" { source = google_compute_disk.sap_hana_scaleout_boot_disks[count.index + 1].self_link } - dynamic attached_disk { + dynamic "scheduling" { + for_each = local.native_bm ? [1] : [] + content { + on_host_maintenance = "TERMINATE" + } + } + + dynamic "attached_disk" { for_each = var.use_single_data_log_disk ? [1] : [] content { device_name = google_compute_disk.sap_hana_scaleout_disks[count.index + 1].name source = google_compute_disk.sap_hana_scaleout_disks[count.index + 1].self_link } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_data_log_disk ? [] : [1] content { device_name = google_compute_disk.sap_hana_data_disks[count.index + 1].name source = google_compute_disk.sap_hana_data_disks[count.index + 1].self_link } } - dynamic attached_disk { + dynamic "attached_disk" { for_each = var.use_single_data_log_disk ? [] : [1] content { device_name = google_compute_disk.sap_hana_log_disks[count.index + 1].name @@ -420,7 +459,7 @@ resource "google_compute_instance" "sap_hana_scaleout_worker_instances" { network_interface { subnetwork = local.subnetwork_uri network_ip = google_compute_address.sap_hana_worker_ip[count.index].address - nic_type = var.nic_type == "" ? null : var.nic_type + nic_type = var.nic_type == "" ? null : var.nic_type # we only include access_config if public_ip is true, an empty access_config # will create an ephemeral public ip dynamic "access_config" { @@ -457,6 +496,7 @@ resource "google_compute_instance" "sap_hana_scaleout_worker_instances" { sap_deployment_debug = var.sap_deployment_debug sap_hana_deployment_bucket = var.sap_hana_deployment_bucket sap_hana_sid = var.sap_hana_sid + sap_hana_sapsys_gid = var.sap_hana_sapsys_gid sap_hana_instance_number = var.sap_hana_instance_number sap_hana_scaleout_nodes = var.sap_hana_worker_nodes + var.sap_hana_standby_nodes @@ -471,6 +511,7 @@ resource "google_compute_instance" "sap_hana_scaleout_worker_instances" { sap_hana_worker_nodes = var.sap_hana_worker_nodes sap_hana_standby_nodes = var.sap_hana_standby_nodes use_single_data_log_disk = var.use_single_data_log_disk + native_bm = local.native_bm template-type = "TERRAFORM" } @@ -500,13 +541,19 @@ resource "google_compute_instance" "sap_hana_scaleout_standby_instances" { source = google_compute_disk.sap_hana_scaleout_boot_disks[count.index + var.sap_hana_worker_nodes + 1].self_link } + dynamic "scheduling" { + for_each = local.native_bm ? [1] : [] + content { + on_host_maintenance = "TERMINATE" + } + } can_ip_forward = var.can_ip_forward network_interface { subnetwork = local.subnetwork_uri network_ip = google_compute_address.sap_hana_standby_ip[count.index].address - nic_type = var.nic_type == "" ? null : var.nic_type + nic_type = var.nic_type == "" ? null : var.nic_type # we only include access_config if public_ip is true, an empty access_config # will create an ephemeral public ip dynamic "access_config" { @@ -543,6 +590,7 @@ resource "google_compute_instance" "sap_hana_scaleout_standby_instances" { sap_deployment_debug = var.sap_deployment_debug sap_hana_deployment_bucket = var.sap_hana_deployment_bucket sap_hana_sid = var.sap_hana_sid + sap_hana_sapsys_gid = var.sap_hana_sapsys_gid sap_hana_instance_number = var.sap_hana_instance_number sap_hana_scaleout_nodes = var.sap_hana_worker_nodes + var.sap_hana_standby_nodes @@ -552,10 +600,12 @@ resource "google_compute_instance" "sap_hana_scaleout_standby_instances" { sap_hana_system_password = var.sap_hana_system_password sap_hana_system_password_secret = var.sap_hana_system_password_secret sap_hana_sidadm_uid = var.sap_hana_sidadm_uid + sap_hana_sapsys_gid = var.sap_hana_sapsys_gid sap_hana_shared_nfs = var.sap_hana_shared_nfs sap_hana_backup_nfs = var.sap_hana_backup_nfs sap_hana_worker_nodes = var.sap_hana_worker_nodes sap_hana_standby_nodes = var.sap_hana_standby_nodes + native_bm = local.native_bm template-type = "TERRAFORM" } diff --git a/modules/sap_hana_scaleout/outputs.tf b/modules/sap_hana_scaleout/outputs.tf index e832d6a3..e3d94e4f 100644 --- a/modules/sap_hana_scaleout/outputs.tf +++ b/modules/sap_hana_scaleout/outputs.tf @@ -19,9 +19,9 @@ output "sap_hana_primary_self_link" { } output "hana_scaleout_worker_self_links" { description = "List of self-links for the hana scaleout workers created" - value = google_compute_instance.sap_hana_scaleout_worker_instances.*.self_link + value = google_compute_instance.sap_hana_scaleout_worker_instances[*].self_link } output "hana_scaleout_standby_self_links" { description = "List of self-links for the hana scaleout standbys created" - value = google_compute_instance.sap_hana_scaleout_standby_instances.*.self_link + value = google_compute_instance.sap_hana_scaleout_standby_instances[*].self_link } diff --git a/modules/sap_hana_scaleout/variables.tf b/modules/sap_hana_scaleout/variables.tf index 8c212bde..f6e73844 100644 --- a/modules/sap_hana_scaleout/variables.tf +++ b/modules/sap_hana_scaleout/variables.tf @@ -65,10 +65,10 @@ variable "sap_hana_deployment_bucket" { type = string description = "The Cloud Storage path that contains the SAP HANA media, do not include gs://. If this is not defined, the GCE instance will be provisioned without SAP HANA installed." validation { - condition = (! (length(regexall( "gs:", var.sap_hana_deployment_bucket)) > 0)) + condition = (!(length(regexall("gs:", var.sap_hana_deployment_bucket)) > 0)) error_message = "The sap_hana_deployment_bucket must only contain the Cloud Storage path, which includes the bucket name and the names of any folders. Do not include gs://." } - default = "" + default = "" } variable "sap_hana_instance_number" { @@ -145,7 +145,7 @@ variable "sap_hana_standby_nodes" { } variable "sap_hana_shared_nfs" { - type = string + type = string validation { condition = var.sap_hana_shared_nfs == "" || can(regex("(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}:\\/[^[:space:]]*", var.sap_hana_shared_nfs)) error_message = "The sap_hana_shared_nfs must be an IP address followed by ':/' then some name." @@ -154,7 +154,7 @@ variable "sap_hana_shared_nfs" { } variable "sap_hana_backup_nfs" { - type = string + type = string validation { condition = var.sap_hana_backup_nfs == "" || can(regex("(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}:\\/[^[:space:]]*", var.sap_hana_backup_nfs)) error_message = "The sap_hana_backup_nfs must be an IP address followed by ':/' then some name." @@ -230,11 +230,11 @@ variable "nic_type" { condition = contains(["VIRTIO_NET", "GVNIC", ""], var.nic_type) error_message = "The nic_type must be either GVNIC or VIRTIO_NET." } - default = "" + default = "" } variable "disk_type" { - type = string + type = string description = "Optional - The default disk type to use on all disks deployed. Extreme disks are not supported on all machine types. See https://cloud.google.com/compute/docs/disks/ for details." validation { condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme"], var.disk_type) @@ -244,21 +244,21 @@ variable "disk_type" { } variable "use_single_data_log_disk" { - type = bool + type = bool description = "Optional - By default two separate disk for data and logs will be made. If set to true, one disk will be used instead." - default = false + default = false } variable "hyperdisk_balanced_iops_default" { - type = number + type = number description = "Optional - default is 3000. Number of IOPS that is set for each disk of type Hyperdisk-balanced (except for boot disk)." - default = 3000 + default = 3000 } variable "hyperdisk_balanced_throughput_default" { - type = number + type = number description = "Optional - default is 750. Throughput in MB/s that is set for each disk of type Hyperdisk-balanced (except for boot disk)." - default = 750 + default = 750 } variable "vm_static_ip" { @@ -268,7 +268,7 @@ variable "vm_static_ip" { condition = var.vm_static_ip == "" || can(regex("^(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$", var.vm_static_ip)) error_message = "The vm_static_ip must be a valid IP address." } - default = "" + default = "" } variable "worker_static_ips" { @@ -280,7 +280,7 @@ variable "worker_static_ips" { ]) error_message = "All worker_static_ips must be valid IP addresses." } - default = [] + default = [] } variable "standby_static_ips" { @@ -292,14 +292,14 @@ variable "standby_static_ips" { ]) error_message = "All standby_static_ips must be valid IP addresses." } - default = [] + default = [] } # # DO NOT MODIFY unless instructed or aware of the implications of using those settings # variable "data_disk_type_override" { - type = string + type = string description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the data disk." validation { condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.data_disk_type_override) @@ -308,7 +308,7 @@ variable "data_disk_type_override" { default = "" } variable "log_disk_type_override" { - type = string + type = string description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the log disk." validation { condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.log_disk_type_override) @@ -317,59 +317,59 @@ variable "log_disk_type_override" { default = "" } variable "unified_disk_size_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the primary's unified disk, that is based off of the machine_type." - default = null + default = null } variable "data_disk_size_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the data disk(s), that is based off of the machine_type." - default = null + default = null } variable "log_disk_size_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the log disk(s), that is based off of the machine_type." - default = null + default = null } variable "unified_disk_iops_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the primary's unified disk will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "data_disk_iops_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the data disk(s) will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "log_disk_iops_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the log disk(s) will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "unified_disk_throughput_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the primary's unified disk will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "data_disk_throughput_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the data disk(s) will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "log_disk_throughput_override" { - type = number + type = number description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the log disk(s) will use. Has no effect if not using a disk type that supports it." - default = null + default = null } variable "primary_startup_url" { type = string description = "Startup script to be executed when the VM boots, should not be overridden." - default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform/sap_hana_scaleout/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform" + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana_scaleout/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform" } variable "secondary_startup_url" { type = string - default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform/sap_hana_scaleout/startup_secondary.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform" + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana_scaleout/startup_secondary.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform" description = "DO NOT USE" } diff --git a/modules/sap_hana_scaleout/versions.tf b/modules/sap_hana_scaleout/versions.tf index f543e7b0..bd9c34c5 100644 --- a/modules/sap_hana_scaleout/versions.tf +++ b/modules/sap_hana_scaleout/versions.tf @@ -16,6 +16,13 @@ terraform { required_version = ">=0.12.6" required_providers { - google = {} + google = { + source = "hashicorp/google" + version = ">= 4.0.0, < 6" + } + assert = { + source = "bwoznicki/assert" + version = "0.0.1" + } } } diff --git a/modules/sap_nw/main.tf b/modules/sap_nw/main.tf index 54448ebc..0432a784 100644 --- a/modules/sap_nw/main.tf +++ b/modules/sap_nw/main.tf @@ -16,8 +16,8 @@ # # Terraform SAP NW for Google Cloud # -# Version: 2.0.202402230649 -# Build Hash: c745a89b214d491fa9b641e2fff78abfe9965016 +# Version: 2.0.202404101403 +# Build Hash: 4d5e66e2ca20a6d498491377677dcc2f3579ebd7 # ################################################################################ diff --git a/modules/sap_nw/variables.tf b/modules/sap_nw/variables.tf index b624b140..d68b508d 100644 --- a/modules/sap_nw/variables.tf +++ b/modules/sap_nw/variables.tf @@ -137,7 +137,7 @@ variable "post_deployment_script" { variable "primary_startup_url" { type = string description = "Startup script to be executed when the VM boots, should not be overridden." - default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform/sap_nw/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform" + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_nw/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform" } variable "can_ip_forward" { diff --git a/modules/sap_nw/versions.tf b/modules/sap_nw/versions.tf index f543e7b0..fb459560 100644 --- a/modules/sap_nw/versions.tf +++ b/modules/sap_nw/versions.tf @@ -16,6 +16,9 @@ terraform { required_version = ">=0.12.6" required_providers { - google = {} + google = { + source = "hashicorp/google" + version = ">= 4.0.0, < 6" + } } } diff --git a/modules/sap_nw_ha/main.tf b/modules/sap_nw_ha/main.tf index 66e44460..b1898633 100644 --- a/modules/sap_nw_ha/main.tf +++ b/modules/sap_nw_ha/main.tf @@ -16,8 +16,8 @@ # # Terraform SAP NW HA for Google Cloud # -# Version: 2.0.202402230649 -# Build Hash: c745a89b214d491fa9b641e2fff78abfe9965016 +# Version: 2.0.202404101403 +# Build Hash: 4d5e66e2ca20a6d498491377677dcc2f3579ebd7 # ################################################################################ @@ -35,7 +35,7 @@ locals { sid = lower(var.sap_sid) hc_firewall_rule_name = var.hc_firewall_rule_name == "" ? "${local.sid}-hc-allow" : var.hc_firewall_rule_name - hc_network_tag = length(var.hc_network_tag) == 0 ? ["${local.hc_firewall_rule_name}"] : var.hc_network_tag + hc_network_tag = length(var.hc_network_tag) == 0 ? [local.hc_firewall_rule_name] : var.hc_network_tag sap_scs_instance_number = var.sap_scs_instance_number == "" ? "00" : var.sap_scs_instance_number scs_inst_group_name = var.scs_inst_group_name == "" ? "${local.sid}-scs-ig" : var.scs_inst_group_name @@ -158,7 +158,7 @@ resource "google_compute_instance" "scs_instance" { can_ip_forward = var.can_ip_forward network_interface { subnetwork = local.subnetwork_uri - network_ip = google_compute_address.sap_nw_vm_ip.0.address + network_ip = google_compute_address.sap_nw_vm_ip[0].address # we only include access_config if public_ip is true, an empty access_config # will create an ephemeral public ip dynamic "access_config" { @@ -192,14 +192,14 @@ resource "google_compute_instance" "scs_instance" { sap_primary_instance = var.sap_primary_instance sap_primary_zone = var.sap_primary_zone scs_hc_port = local.scs_hc_port - scs_vip_address = google_compute_address.nw_vips.0.address + scs_vip_address = google_compute_address.nw_vips[0].address scs_vip_name = local.scs_vip_name # ERS settings sap_secondary_instance = var.sap_secondary_instance sap_secondary_zone = var.sap_secondary_zone ers_hc_port = local.ers_hc_port - ers_vip_address = google_compute_address.nw_vips.1.address + ers_vip_address = google_compute_address.nw_vips[1].address ers_vip_name = local.ers_vip_name # File system settings @@ -257,7 +257,7 @@ resource "google_compute_instance" "ers_instance" { can_ip_forward = var.can_ip_forward network_interface { subnetwork = local.subnetwork_uri - network_ip = google_compute_address.sap_nw_vm_ip.1.address + network_ip = google_compute_address.sap_nw_vm_ip[1].address # we only include access_config if public_ip is true, an empty access_config # will create an ephemeral public ip dynamic "access_config" { @@ -291,14 +291,14 @@ resource "google_compute_instance" "ers_instance" { sap_primary_instance = var.sap_primary_instance sap_primary_zone = var.sap_primary_zone scs_hc_port = local.scs_hc_port - scs_vip_address = google_compute_address.nw_vips.0.address + scs_vip_address = google_compute_address.nw_vips[0].address scs_vip_name = local.scs_vip_name # ERS settings sap_secondary_instance = var.sap_secondary_instance sap_secondary_zone = var.sap_secondary_zone ers_hc_port = local.ers_hc_port - ers_vip_address = google_compute_address.nw_vips.1.address + ers_vip_address = google_compute_address.nw_vips[1].address ers_vip_name = local.ers_vip_name # File system settings @@ -343,7 +343,7 @@ resource "google_compute_address" "nw_vips" { resource "google_compute_instance_group" "nw_instance_groups" { count = 2 name = count.index == 0 ? local.scs_inst_group_name : local.ers_inst_group_name - instances = count.index == 0 ? google_compute_instance.scs_instance.*.self_link : google_compute_instance.ers_instance.*.self_link + instances = count.index == 0 ? google_compute_instance.scs_instance[*].self_link : google_compute_instance.ers_instance[*].self_link zone = count.index == 0 ? var.sap_primary_zone : var.sap_secondary_zone project = var.project_id } @@ -391,7 +391,7 @@ resource "google_compute_region_backend_service" "nw_regional_backend_services" name = count.index == 0 ? local.scs_backend_svc_name : local.ers_backend_svc_name region = local.region load_balancing_scheme = "INTERNAL" - health_checks = [element(google_compute_health_check.nw_hc.*.id, count.index)] + health_checks = [element(google_compute_health_check.nw_hc[*].id, count.index)] project = var.project_id failover_policy { @@ -400,11 +400,11 @@ resource "google_compute_region_backend_service" "nw_regional_backend_services" failover_ratio = 1 } backend { - group = element(google_compute_instance_group.nw_instance_groups.*.id, count.index) + group = element(google_compute_instance_group.nw_instance_groups[*].id, count.index) failover = false } backend { - group = element(google_compute_instance_group.nw_instance_groups.*.id, 1 - count.index) + group = element(google_compute_instance_group.nw_instance_groups[*].id, 1 - count.index) failover = true } } @@ -415,10 +415,10 @@ resource "google_compute_region_backend_service" "nw_regional_backend_services" resource "google_compute_forwarding_rule" "nw_forwarding_rules" { count = 2 name = count.index == 0 ? local.scs_forw_rule_name : local.ers_forw_rule_name - ip_address = element(google_compute_address.nw_vips.*.address, count.index) + ip_address = element(google_compute_address.nw_vips[*].address, count.index) region = local.region load_balancing_scheme = "INTERNAL" - backend_service = element(google_compute_region_backend_service.nw_regional_backend_services.*.id, count.index) + backend_service = element(google_compute_region_backend_service.nw_regional_backend_services[*].id, count.index) all_ports = true subnetwork = local.subnetwork_uri project = var.project_id diff --git a/modules/sap_nw_ha/outputs.tf b/modules/sap_nw_ha/outputs.tf index 5058da33..2685aa73 100644 --- a/modules/sap_nw_ha/outputs.tf +++ b/modules/sap_nw_ha/outputs.tf @@ -23,25 +23,25 @@ output "ers_instance" { } output "nw_vips" { description = "NW virtual IPs" - value = google_compute_address.nw_vips.*.self_link + value = google_compute_address.nw_vips[*].self_link } output "nw_instance_groups" { description = "NW Instance Groups" - value = google_compute_instance_group.nw_instance_groups.*.self_link + value = google_compute_instance_group.nw_instance_groups[*].self_link } output "nw_hc" { description = "Health Checks" - value = google_compute_health_check.nw_hc.*.self_link + value = google_compute_health_check.nw_hc[*].self_link } output "nw_hc_firewall" { description = "Firewall rule for the Health Checks" - value = google_compute_firewall.nw_hc_firewall_rule.*.self_link + value = google_compute_firewall.nw_hc_firewall_rule[*].self_link } output "nw_regional_backend_services" { description = "Backend Services" - value = google_compute_region_backend_service.nw_regional_backend_services.*.self_link + value = google_compute_region_backend_service.nw_regional_backend_services[*].self_link } output "nw_forwarding_rules" { description = "Forwarding rules" - value = google_compute_forwarding_rule.nw_forwarding_rules.*.self_link + value = google_compute_forwarding_rule.nw_forwarding_rules[*].self_link } diff --git a/modules/sap_nw_ha/variables.tf b/modules/sap_nw_ha/variables.tf index 6f94c754..49680157 100644 --- a/modules/sap_nw_ha/variables.tf +++ b/modules/sap_nw_ha/variables.tf @@ -322,12 +322,12 @@ variable "secondary_reservation_name" { # variable "primary_startup_url" { type = string - default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform/sap_nw_ha/startup_scs.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform" + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_nw_ha/startup_scs.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform" description = "DO NOT USE" } variable "secondary_startup_url" { type = string - default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform/sap_nw_ha/startup_ers.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202402230649/terraform" + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_nw_ha/startup_ers.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform" description = "DO NOT USE" } variable "post_deployment_script" { diff --git a/modules/sap_nw_ha/versions.tf b/modules/sap_nw_ha/versions.tf index da31d1ff..5afb4ea1 100644 --- a/modules/sap_nw_ha/versions.tf +++ b/modules/sap_nw_ha/versions.tf @@ -16,6 +16,9 @@ terraform { required_version = ">=0.12.6" required_providers { - google = {} + google = { + source = "hashicorp/google" + version = ">= 4.0.0, < 6" + } } } \ No newline at end of file From b98785827142a874c5ed76dc92a70b1cf6f02462 Mon Sep 17 00:00:00 2001 From: Google SAP Deployments Dev Date: Thu, 11 Apr 2024 18:29:45 +0000 Subject: [PATCH 3/8] Project import generated by Copybara. GitOrigin-RevId: f34e0227e9b1af54531a09ae635dddcccbbb4d0d --- modules/sap_hana/README.md | 120 ++++++++++++++++------------ modules/sap_hana_ha/README.md | 111 +++++++++++-------------- modules/sap_hana_scaleout/README.md | 84 ++++++------------- modules/sap_nw/README.md | 66 +++++++++++++++ modules/sap_nw_ha/README.md | 90 +++++++++++++++++++++ 5 files changed, 298 insertions(+), 173 deletions(-) create mode 100644 modules/sap_nw/README.md create mode 100644 modules/sap_nw_ha/README.md diff --git a/modules/sap_hana/README.md b/modules/sap_hana/README.md index 00bab516..dd1fccab 100644 --- a/modules/sap_hana/README.md +++ b/modules/sap_hana/README.md @@ -1,63 +1,110 @@ -# Terraform for SAP HANA for Google Cloud +# Terraform for SAP NW HA for Google Cloud This template follows the documented steps https://cloud.google.com/solutions/sap/docs/certifications-sap-hana and deploys GCP and Pacemaker resources up to the installation of SAP's central services. -## Usage +## Set up Terraform -Basic usage of this module is as follows: +Install Terraform on the machine you would like to use to deploy from by +following +https://learn.hashicorp.com/tutorials/terraform/install-cli?in=terraform/gcp-get-started#install-terraform -```hcl -module "sap_hana" { - source = "terraform-google-modules/sap/google//modules/sap_hana" - version = "~> 1.1" +## How to deploy - project_id = "PROJECT_ID" # example: my-project-x - zone = "ZONE" # example: us-east1-b - machine_type = "MACHINE_TYPE" # example: n1-highmem-32 - subnetwork = "SUBNETWORK" # example: default - linux_image = "LINUX_IMAGE" # example: rhel-8-4-sap-ha - linux_image_project = "LINUX_IMAGE_PROJECT" # example: rhel-sap-cloud - instance_name = "VM_NAME" # example: hana_instance - sap_hana_sid = "SID" # example: ABC, Must conform to [a-zA-Z][a-zA-Z0-9]{2} -} -``` +1. Download .tf file into an empty directory `curl + https://storage.googleapis.com/cloudsapdeploy/deploymentmanager/latest/dm-templates/sap_nw_ha/terraform/sap_nw_ha.tf + -o sap_nw_ha.tf` -Functional example is included in the -[examples](../../examples/sap_hana_simple) directory. +2. Fill in mandatory variables and if the desired optional variable in the .tf + file. + +3. Deploy + + 1. Run `terraform init` (only needed once) + 2. Run `terraform plan` to see what is going to be deployed. Verify if + names, zones, sizes, etc. are as desired. + 3. Run `terrafom apply` to deploy the resources + 4. Run `terrafom destroy` to remove the resources + +4. Continue installation of SAP software and setup of remaining cluster + resources as per documentation at + https://cloud.google.com/solutions/sap/docs/sap-hana-deployment-guide + +## Additional information + +For additional information see https://www.terraform.io/docs/index.html and +https://cloud.google.com/docs/terraform ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| +| backup\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the backup disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| backup\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the backup disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| backup\_disk\_type | Optional - The default is hyperdisk-balanced for native bare metal machines and pd-balanced otherwise, only used if a backup disk is needed. | `string` | `""` | no | | can\_ip\_forward | Whether sending and receiving of packets with non-matching source or destination IPs is allowed. | `bool` | `true` | no | +| data\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the data disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| data\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the data disk(s), that is based off of the machine\_type. | `number` | `null` | no | +| data\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the data disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| data\_disk\_type\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default\_disk\_type' for the data disk. | `string` | `""` | no | +| disk\_type | Optional - The default disk type to use for disk(s) containing log and data volumes. The default is hyperdisk-extreme for native bare metal machines and pd-ssd otherwise. Not all disk are supported on all machine types - see https://cloud.google.com/compute/docs/disks/ for details. | `string` | `""` | no | +| enable\_fast\_restart | Optional - The default is true. If set enables HANA Fast Restart. | `bool` | `true` | no | +| hyperdisk\_balanced\_iops\_default | Optional - default is 3000. Number of IOPS that is set for each disk of type Hyperdisk-balanced (except for boot/usrsap/shared disks). | `number` | `3000` | no | +| hyperdisk\_balanced\_throughput\_default | Optional - default is 750. Throughput in MB/s that is set for each disk of type Hyperdisk-balanced (except for boot/usrsap/shared disks). | `number` | `750` | no | +| include\_backup\_disk | Optional - The default is true. If set creates a disk for backups. | `bool` | `true` | no | | instance\_name | Hostname of the GCE instance. | `string` | n/a | yes | | linux\_image | Linux image name to use. | `string` | n/a | yes | | linux\_image\_project | The project which the Linux image belongs to. | `string` | n/a | yes | +| log\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the log disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| log\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the log disk(s), that is based off of the machine\_type. | `number` | `null` | no | +| log\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the log disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| log\_disk\_type\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default\_disk\_type' for the log disk. | `string` | `""` | no | | machine\_type | Machine type for the instances. | `string` | n/a | yes | | network\_tags | OPTIONAL - Network tags can be associated to your instance on deployment. This can be used for firewalling or routing purposes. | `list(string)` | `[]` | no | +| nic\_type | Optional - This value determines the type of NIC to use, valid options are GVNIC and VIRTIO\_NET. If choosing GVNIC make sure that it is supported by your OS choice here https://cloud.google.com/compute/docs/images/os-details#networking. | `string` | `""` | no | | post\_deployment\_script | OPTIONAL - gs:// or https:// location of a script to execute on the created VM's post deployment. | `string` | `""` | no | -| primary\_startup\_url | Startup script to be executed when the VM boots, should not be overridden. | `string` | `"curl -s BUILD.TERRA_SH_URL/sap_hana/startup.sh | bash -s BUILD.TERRA_SH_URL"` | no | +| primary\_startup\_url | Startup script to be executed when the VM boots, should not be overridden. | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform"` | no | | project\_id | Project id where the instances will be created. | `string` | n/a | yes | | public\_ip | OPTIONAL - Defines whether a public IP address should be added to your VM. By default this is set to Yes. Note that if you set this to No without appropriate network nat and tags in place, there will be no route to the internet and thus the installation will fail. | `bool` | `true` | no | | reservation\_name | Use a reservation specified by RESERVATION\_NAME.
By default ANY\_RESERVATION is used when this variable is empty.
In order for a reservation to be used it must be created with the
"Select specific reservation" selected (specificReservationRequired set to true)
Be sure to create your reservation with the correct Min CPU Platform for the
following instance types:
n1-highmem-32 : Intel Broadwell
n1-highmem-64 : Intel Broadwell
n1-highmem-96 : Intel Skylake
n1-megamem-96 : Intel Skylake
m1-megamem-96 : Intel Skylake
All other instance types can have automatic Min CPU Platform" | `string` | `""` | no | | sap\_deployment\_debug | OPTIONAL - If this value is set to true, the deployment will generates verbose deployment logs. Only turn this setting on if a Google support engineer asks you to enable debugging. | `bool` | `false` | no | -| sap\_hana\_backup\_size | Size in GB of the /hanabackup volume. If this is not set or set to zero, the GCE instance will be provisioned with a hana backup volume of 2 times the total memory. | `number` | `0` | no | -| sap\_hana\_deployment\_bucket | The GCS bucket containing the SAP HANA media. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. | `string` | `""` | no | +| sap\_hana\_backup\_nfs | NFS endpoint for /hanabackup storage. | `string` | `""` | no | +| sap\_hana\_backup\_nfs\_resource | NFS resource to be used as the backup drive instead of a disk. This and sap\_hana\_backup\_nfs may not both be set. |
object({
networks = list(
object({
ip_addresses = list(string)
})
)
file_shares = list(
object({
name = string
})
)

})
| `null` | no | +| sap\_hana\_backup\_size | Size in GB of the /hanabackup volume. If this is not set or set to zero, the GCE instance will be provisioned with a hana backup volume of 2 times the total memory. If sap\_hana\_backup\_nfs is set, this setting is ignored. | `number` | `0` | no | +| sap\_hana\_deployment\_bucket | The Cloud Storage path that contains the SAP HANA media, do not include gs://. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. | `string` | `""` | no | | sap\_hana\_instance\_number | The SAP instance number. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. | `number` | `0` | no | | sap\_hana\_sapsys\_gid | The Linux GID of the SAPSYS group. By default this is set to 79 | `number` | `79` | no | | sap\_hana\_scaleout\_nodes | Number of additional nodes to add. E.g - if you wish for a 4 node cluster you would specify 3 here. | `number` | `0` | no | +| sap\_hana\_shared\_nfs | NFS endpoint for /hana/shared storage. | `string` | `""` | no | +| sap\_hana\_shared\_nfs\_resource | NFS resource to be used as the shared drive instead of a disk. This and sap\_hana\_shared\_nfs may not both be set. |
object({
networks = list(
object({
ip_addresses = list(string)
})
)
file_shares = list(
object({
name = string
})
)

})
| `null` | no | | sap\_hana\_sid | The SAP HANA SID. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. SID must adhere to SAP standard (Three letters or numbers and start with a letter) | `string` | n/a | yes | | sap\_hana\_sidadm\_password | The linux sidadm login password. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. Minimum requirement is 8 characters. | `string` | `""` | no | | sap\_hana\_sidadm\_password\_secret | The secret key used to retrieve the linux sidadm login from Secret Manager (https://cloud.google.com/secret-manager). The Secret Manager password will overwrite the clear text password from sap\_hana\_sidadm\_password if both are set. | `string` | `""` | no | | sap\_hana\_sidadm\_uid | The Linux UID of the adm user. By default this is set to 900 to avoid conflicting with other OS users. | `number` | `900` | no | | sap\_hana\_system\_password | The SAP HANA SYSTEM password. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. Minimum requirement is 8 characters with at least 1 number. | `string` | `""` | no | | sap\_hana\_system\_password\_secret | The secret key used to retrieve the SAP HANA SYSTEM login from Secret Manager (https://cloud.google.com/secret-manager). The Secret Manager password will overwrite the clear text password from sap\_hana\_system\_password if both are set. | `string` | `""` | no | -| secondary\_startup\_url | DO NOT USE | `string` | `"curl -s BUILD.TERRA_SH_URL/sap_hana/startup_secondary.sh | bash -s BUILD.TERRA_SH_URL"` | no | +| secondary\_startup\_url | DO NOT USE | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana/startup_secondary.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform"` | no | | service\_account | OPTIONAL - Ability to define a custom service account instead of using the default project service account. | `string` | `""` | no | +| shared\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the shared disk will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| shared\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the shared disk, that is based off of the machine\_type. | `number` | `null` | no | +| shared\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the shared disk will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| shared\_disk\_type\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default\_disk\_type' for the shared disk. | `string` | `""` | no | | subnetwork | The sub network to deploy the instance in. | `string` | n/a | yes | +| unified\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the primary's unified disk will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| unified\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the primary's unified disk, that is based off of the machine\_type. | `number` | `null` | no | +| unified\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the primary's unified disk will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| unified\_worker\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the unified worker disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| unified\_worker\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the unified worker disk(s), that is based off of the machine\_type. | `number` | `null` | no | +| unified\_worker\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the unified worker disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| use\_single\_shared\_data\_log\_disk | Optional - By default three separate disk for data, logs, and shared will be made. If set to true, one disk will be used instead. | `bool` | `false` | no | +| usrsap\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the /usr/sap disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| usrsap\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the /usr/sap disk(s), that is based off of the machine\_type. | `number` | `null` | no | +| usrsap\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the /usr/sap disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| usrsap\_disk\_type\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default\_disk\_type' for the /usr/sap disk. | `string` | `""` | no | +| vm\_static\_ip | Optional - Defines an internal static IP for the VM. | `string` | `""` | no | +| worker\_static\_ips | Optional - Defines internal static IP addresses for the worker nodes. | `list(string)` | `[]` | no | | zone | Zone where the instances will be created. | `string` | n/a | yes | ## Outputs @@ -67,29 +114,4 @@ Functional example is included in the | sap\_hana\_primary\_self\_link | SAP HANA self-link for the primary instance created | | sap\_hana\_worker\_self\_links | SAP HANA self-links for the secondary instances created | - - -## Requirements - -These sections describe requirements for using this module. - -### Software - -The following dependencies must be available: - -- [Terraform][terraform] v0.13 -- [Terraform Provider for GCP][terraform-provider-gcp] plugin v4.0 - -## Contributing - -Refer to the [contribution guidelines](./CONTRIBUTING.md) for -information on contributing to this module. - -[iam-module]: https://registry.terraform.io/modules/terraform-google-modules/iam/google -[project-factory-module]: https://registry.terraform.io/modules/terraform-google-modules/project-factory/google -[terraform-provider-gcp]: https://www.terraform.io/docs/providers/google/index.html -[terraform]: https://www.terraform.io/downloads.html - -## Security Disclosures - -Please see our [security disclosure process](./SECURITY.md). + \ No newline at end of file diff --git a/modules/sap_hana_ha/README.md b/modules/sap_hana_ha/README.md index 8bedeab5..959de4fb 100644 --- a/modules/sap_hana_ha/README.md +++ b/modules/sap_hana_ha/README.md @@ -1,65 +1,54 @@ -# terraform-google-sap for SAP HANA HA - -This module is meant to create SAP HANA HA instance(s) for Google Cloud - -The resources/services/activations/deletions that this module will create/trigger are: - -- A set of compute engine instances, primary and secondary (if specified) -- A set of compute disks -- IP addresses for the instances to use -- Primary and secondary (if specified) GCE instance groups - -## Usage - -Basic usage of this module is as follows: - -```hcl -module "sap_hana_ha" { - source = "terraform-google-modules/sap/google//modules/sap_hana_ha" - version = "~> 1.1" - - project_id = "PROJECT_ID" # example: my-project-x - machine_type = "MACHINE_TYPE" # example: n1-highmem-32 - network = "NETWORK" # example: default - subnetwork = "SUBNETWORK" # example: default - linux_image = "LINUX_IMAGE" # example: rhel-8-4-sap-ha - linux_image_project = "LINUX_IMAGE_PROJECT" # example: rhel-sap-cloud - primary_instance_name = "PRIMARY_NAME" # example: hana-ha-primary - primary_zone = "PRIMARY_ZONE" # example: us-east1-b, must be in the same region as secondary_zone - secondary_instance_name = "SECONDARY_NAME" # example: hana-ha-secondary - secondary_zone = "SECONDARY_ZONE" # example: us-east1-c, must be in the same region as primary_zone -} -``` - -Functional example is included in the -[examples](../../examples/sap_hana_ha_simple) directory. - +TODO add README info ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| +| backup\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the backup disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| backup\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the backup disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| backup\_disk\_type | Optional - The default is hyperdisk-balanced for native bare metal machines and pd-balanced otherwise, only used if a backup disk is needed. | `string` | `""` | no | | can\_ip\_forward | Whether sending and receiving of packets with non-matching source or destination IPs is allowed. | `bool` | `true` | no | +| data\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the data disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| data\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the data disk(s), that is based off of the machine\_type. | `number` | `null` | no | +| data\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the data disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| data\_disk\_type\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default\_disk\_type' for the data disk. | `string` | `""` | no | +| disk\_type | Optional - The default disk type to use for disk(s) containing log and data volumes. The default is hyperdisk-extreme for native bare metal machines and pd-ssd otherwise. Not all disk are supported on all machine types - see https://cloud.google.com/compute/docs/disks/ for details. | `string` | `""` | no | +| enable\_fast\_restart | Optional - The default is true. If set enables HANA Fast Restart. | `bool` | `true` | no | +| hyperdisk\_balanced\_iops\_default | Optional - default is 3000. Number of IOPS that is set for each disk of type Hyperdisk-balanced (except for boot/usrsap/shared disks). | `number` | `3000` | no | +| hyperdisk\_balanced\_throughput\_default | Optional - default is 750. Throughput in MB/s that is set for each disk of type Hyperdisk-balanced (except for boot/usrsap/shared disks). | `number` | `750` | no | +| include\_backup\_disk | Optional - The default is true. If set creates a disk for backups. | `bool` | `true` | no | | is\_work\_load\_management\_deployment | If set the necessary tags and labels will be added to resoucres to support WLM. | `bool` | `false` | no | | linux\_image | Linux image name to use. | `string` | n/a | yes | | linux\_image\_project | The project which the Linux image belongs to. | `string` | n/a | yes | | loadbalancer\_name | OPTIONAL - Name of the load balancer that will be created. If left blank with use\_ilb\_vip set to true, then will use lb-SID as default | `string` | `""` | no | +| log\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the log disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| log\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the log disk(s), that is based off of the machine\_type. | `number` | `null` | no | +| log\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the log disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| log\_disk\_type\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default\_disk\_type' for the log disk. | `string` | `""` | no | | machine\_type | Machine type for the instances. | `string` | n/a | yes | +| majority\_maker\_instance\_name | Optional - Name to use for the Majority Maker instance. Must be provided if scaleout\_nodes > 0. | `string` | `""` | no | +| majority\_maker\_machine\_type | Optional - The machine type to use for the Majority Maker instance. Must be provided if scaleout\_nodes > 0. | `string` | `""` | no | +| majority\_maker\_startup\_url | DO NOT USE | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana_ha/startup_majority_maker.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform"` | no | +| majority\_maker\_zone | Optional - The zone in which the Majority Maker instance will be deployed. Must be provided if scaleout\_nodes > 0. It is recommended for this to be different from the zones the primary and secondary instance are deployed in. | `string` | `""` | no | | network | Network in which the ILB resides including resources like firewall rules. | `string` | n/a | yes | | network\_tags | OPTIONAL - Network tags can be associated to your instance on deployment. This can be used for firewalling or routing purposes. | `list(string)` | `[]` | no | +| nic\_type | Optional - This value determines the type of NIC to use, valid options are GVNIC and VIRTIO\_NET. If choosing GVNIC make sure that it is supported by your OS choice here https://cloud.google.com/compute/docs/images/os-details#networking. | `string` | `""` | no | | post\_deployment\_script | OPTIONAL - gs:// or https:// location of a script to execute on the created VM's post deployment. | `string` | `""` | no | | primary\_instance\_group\_name | OPTIONAL - Unmanaged instance group to be created for the primary node. If blank, will use ig-VM\_NAME | `string` | `""` | no | | primary\_instance\_name | Hostname of the primary GCE instance. | `string` | n/a | yes | | primary\_reservation\_name | Use a reservation specified by RESERVATION\_NAME.
By default ANY\_RESERVATION is used when this variable is empty.
In order for a reservation to be used it must be created with the
"Select specific reservation" selected (specificReservationRequired set to true)
Be sure to create your reservation with the correct Min CPU Platform for the
following instance types:
n1-highmem-32 : Intel Broadwell
n1-highmem-64 : Intel Broadwell
n1-highmem-96 : Intel Skylake
n1-megamem-96 : Intel Skylake
m1-megamem-96 : Intel Skylake
All other instance types can have automatic Min CPU Platform" | `string` | `""` | no | -| primary\_startup\_url | Startup script to be executed when the VM boots, should not be overridden. | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202210141928/terraform/sap_hana_ha/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202210141928/terraform"` | no | +| primary\_startup\_url | Startup script to be executed when the VM boots, should not be overridden. | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana_ha/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform"` | no | +| primary\_static\_ip | Optional - Defines an internal static IP for the primary VM. | `string` | `""` | no | +| primary\_worker\_static\_ips | Optional - Defines internal static IP addresses for the primary worker nodes. | `list(string)` | `[]` | no | | primary\_zone | Zone where the primary instances will be created. | `string` | n/a | yes | | project\_id | Project id where the instances will be created. | `string` | n/a | yes | | public\_ip | OPTIONAL - Defines whether a public IP address should be added to your VM. By default this is set to Yes. Note that if you set this to No without appropriate network nat and tags in place, there will be no route to the internet and thus the installation will fail. | `bool` | `true` | no | | sap\_deployment\_debug | OPTIONAL - If this value is set to true, the deployment will generates verbose deployment logs. Only turn this setting on if a Google support engineer asks you to enable debugging. | `bool` | `false` | no | | sap\_hana\_backup\_size | Size in GB of the /hanabackup volume. If this is not set or set to zero, the GCE instance will be provisioned with a hana backup volume of 2 times the total memory. | `number` | `0` | no | -| sap\_hana\_deployment\_bucket | The GCS bucket containing the SAP HANA media. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. | `string` | `""` | no | +| sap\_hana\_deployment\_bucket | The Cloud Storage path that contains the SAP HANA media, do not include gs://. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. | `string` | `""` | no | | sap\_hana\_instance\_number | The SAP instance number. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. | `number` | `0` | no | | sap\_hana\_sapsys\_gid | The Linux GID of the SAPSYS group. By default this is set to 79 | `number` | `79` | no | +| sap\_hana\_scaleout\_nodes | Optional - Specify to add scaleout nodes to both HA instances. | `number` | `0` | no | | sap\_hana\_sid | The SAP HANA SID. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. SID must adhere to SAP standard (Three letters or numbers and start with a letter) | `string` | `""` | no | | sap\_hana\_sidadm\_password | The linux sidadm login password. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. Minimum requirement is 8 characters. | `string` | `""` | no | | sap\_hana\_sidadm\_password\_secret | The secret key used to retrieve the linux sidadm login from Secret Manager (https://cloud.google.com/secret-manager). The Secret Manager password will overwrite the clear text password from sap\_hana\_sidadm\_password if both are set. | `string` | `""` | no | @@ -70,11 +59,26 @@ Functional example is included in the | secondary\_instance\_group\_name | OPTIONAL - Unmanaged instance group to be created for the secondary node. If blank, will use ig-VM\_NAME | `string` | `""` | no | | secondary\_instance\_name | Hostname of the secondary GCE instance. | `string` | n/a | yes | | secondary\_reservation\_name | Use a reservation specified by RESERVATION\_NAME.
By default ANY\_RESERVATION is used when this variable is empty.
In order for a reservation to be used it must be created with the
"Select specific reservation" selected (specificReservationRequired set to true)
Be sure to create your reservation with the correct Min CPU Platform for the
following instance types:
n1-highmem-32 : Intel Broadwell
n1-highmem-64 : Intel Broadwell
n1-highmem-96 : Intel Skylake
n1-megamem-96 : Intel Skylake
m1-megamem-96 : Intel Skylake
All other instance types can have automatic Min CPU Platform" | `string` | `""` | no | -| secondary\_startup\_url | DO NOT USE | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202210141928/terraform/sap_hana_ha/startup_secondary.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202210141928/terraform"` | no | +| secondary\_startup\_url | DO NOT USE | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana_ha/startup_secondary.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform"` | no | +| secondary\_static\_ip | Optional - Defines an internal static IP for the secondary VM. | `string` | `""` | no | +| secondary\_worker\_static\_ips | Optional - Defines internal static IP addresses for the secondary worker nodes. | `list(string)` | `[]` | no | | secondary\_zone | Zone where the secondary instances will be created. | `string` | n/a | yes | | service\_account | OPTIONAL - Ability to define a custom service account instead of using the default project service account. | `string` | `""` | no | +| shared\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the shared disk will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| shared\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the shared disk, that is based off of the machine\_type. | `number` | `null` | no | +| shared\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the shared disk will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| shared\_disk\_type\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default\_disk\_type' for the shared disk. | `string` | `""` | no | | subnetwork | The sub network to deploy the instance in. | `string` | n/a | yes | +| unified\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the primary's unified disk will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| unified\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the primary disk(s), that is based off of the machine\_type. | `number` | `null` | no | +| unified\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the primary's unified disk will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| use\_single\_shared\_data\_log\_disk | Optional - By default three separate disk for data, logs, and shared will be made. If set to true, one disk will be used instead. | `bool` | `false` | no | +| usrsap\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the /usr/sap disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| usrsap\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the /usr/sap disk(s), that is based off of the machine\_type. | `number` | `null` | no | +| usrsap\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the /usr/sap disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| usrsap\_disk\_type\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default\_disk\_type' for the /usr/sap disk. | `string` | `""` | no | | wlm\_deployment\_name | Deployment name to be used for integrating into Work Load Management. | `string` | `""` | no | +| worker\_startup\_url | Startup script to be executed when the worker VM boots, should not be overridden. | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana_ha/startup_worker.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform"` | no | ## Outputs @@ -83,31 +87,8 @@ Functional example is included in the | sap\_hana\_ha\_firewall\_link | Link to the optional fire wall | | sap\_hana\_ha\_loadbalander\_link | Link to the optional load balancer | | sap\_hana\_ha\_primary\_instance\_self\_link | Self-link for the primary SAP HANA HA instance created. | +| sap\_hana\_ha\_primary\_worker\_self\_links | Self-link for the worker nodes in the primary SAP HANA HA instance. | | sap\_hana\_ha\_secondary\_instance\_self\_link | Self-link for the secondary SAP HANA HA instance created. | +| sap\_hana\_ha\_secondary\_worker\_self\_links | Self-link for the worker nodes in the secondary SAP HANA HA instance. | - - -## Requirements - -These sections describe requirements for using this module. - -### Software - -The following dependencies must be available: - -- [Terraform][terraform] v0.13 -- [Terraform Provider for GCP][terraform-provider-gcp] plugin v4.0 - -## Contributing - -Refer to the [contribution guidelines](./CONTRIBUTING.md) for -information on contributing to this module. - -[iam-module]: https://registry.terraform.io/modules/terraform-google-modules/iam/google -[project-factory-module]: https://registry.terraform.io/modules/terraform-google-modules/project-factory/google -[terraform-provider-gcp]: https://www.terraform.io/docs/providers/google/index.html -[terraform]: https://www.terraform.io/downloads.html - -## Security Disclosures - -Please see our [security disclosure process](./SECURITY.md). + \ No newline at end of file diff --git a/modules/sap_hana_scaleout/README.md b/modules/sap_hana_scaleout/README.md index 59be3110..bdad16a0 100644 --- a/modules/sap_hana_scaleout/README.md +++ b/modules/sap_hana_scaleout/README.md @@ -1,53 +1,37 @@ -# terraform-google-sap for SAP HANA Scaleout - -This template follows the documented steps -https://cloud.google.com/solutions/sap/docs/sap-hana-ha-scaleout-tf-deployment-guide and deploys an SAP HANA scale-out system that includes the SAP HANA host auto-failover fault-recovery solution. - -## Usage - -Basic usage of this module is as follows: - -```hcl -module "hana_scaleout" { - source = "terraform-google-modules/sap/google//modules/sap_hana_scaleout" - version = "~> 1.1" - - project_id = "PROJECT_ID" # example: my-project-x - zone = "ZONE" # example: us-east1-b - machine_type = "MACHINE_TYPE" # example: n1-highmem-32 - subnetwork = "SUBNETWORK" # example: default - linux_image = "LINUX_IMAGE" # example: rhel-8-4-sap-ha - linux_image_project = "LINUX_IMAGE_PROJECT" # example: rhel-sap-cloud - instance_name = "VM_NAME" # example: hana-instance - sap_hana_sid = "SID" # example: ABC, Must conform to [a-zA-Z][a-zA-Z0-9]{2} - sap_hana_shared_nfs = "HANA_SHARED_NFS" # example: 10.10.10.10:/shared - sap_hana_backup_nfs = "HANA_BACKUP_NFS" # example: 10.10.10.10:/backup -} -``` - -Functional example is included in the -[examples](../../examples/sap_hana_scaleout) directory. - +TODO add README info ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | can\_ip\_forward | Whether sending and receiving of packets with non-matching source or destination IPs is allowed. | `bool` | `true` | no | +| data\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the data disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| data\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the data disk(s), that is based off of the machine\_type. | `number` | `null` | no | +| data\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the data disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| data\_disk\_type\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default\_disk\_type' for the data disk. | `string` | `""` | no | +| disk\_type | Optional - The default disk type to use on all disks deployed. Extreme disks are not supported on all machine types. See https://cloud.google.com/compute/docs/disks/ for details. | `string` | `"pd-ssd"` | no | +| hyperdisk\_balanced\_iops\_default | Optional - default is 3000. Number of IOPS that is set for each disk of type Hyperdisk-balanced (except for boot disk). | `number` | `3000` | no | +| hyperdisk\_balanced\_throughput\_default | Optional - default is 750. Throughput in MB/s that is set for each disk of type Hyperdisk-balanced (except for boot disk). | `number` | `750` | no | | instance\_name | Hostname of the GCE instance. | `string` | n/a | yes | | linux\_image | Linux image name to use. | `string` | n/a | yes | | linux\_image\_project | The project which the Linux image belongs to. | `string` | n/a | yes | +| log\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the log disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| log\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the log disk(s), that is based off of the machine\_type. | `number` | `null` | no | +| log\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the log disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| log\_disk\_type\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default\_disk\_type' for the log disk. | `string` | `""` | no | | machine\_type | Machine type for the instances. | `string` | n/a | yes | | network\_tags | OPTIONAL - Network tags can be associated to your instance on deployment. This can be used for firewalling or routing purposes. | `list(string)` | `[]` | no | +| nic\_type | Optional - This value determines the type of NIC to use, valid options are GVNIC and VIRTIO\_NET. If choosing GVNIC make sure that it is supported by your OS choice here https://cloud.google.com/compute/docs/images/os-details#networking. | `string` | `""` | no | | post\_deployment\_script | OPTIONAL - gs:// or https:// location of a script to execute on the created VM's post deployment. | `string` | `""` | no | -| primary\_startup\_url | Startup script to be executed when the VM boots, should not be overridden. | `string` | `"curl -s BUILD.TERRA_SH_URL/sap_hana_scaleout/startup.sh | bash -s BUILD.TERRA_SH_URL"` | no | +| primary\_startup\_url | Startup script to be executed when the VM boots, should not be overridden. | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana_scaleout/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform"` | no | | project\_id | Project id where the instances will be created. | `string` | n/a | yes | | public\_ip | OPTIONAL - Defines whether a public IP address should be added to your VM. By default this is set to Yes. Note that if you set this to No without appropriate network nat and tags in place, there will be no route to the internet and thus the installation will fail. | `bool` | `true` | no | | reservation\_name | Use a reservation specified by RESERVATION\_NAME.
By default ANY\_RESERVATION is used when this variable is empty.
In order for a reservation to be used it must be created with the
"Select specific reservation" selected (specificReservationRequired set to true)
Be sure to create your reservation with the correct Min CPU Platform for the
following instance types:
n1-highmem-32 : Intel Broadwell
n1-highmem-64 : Intel Broadwell
n1-highmem-96 : Intel Skylake
n1-megamem-96 : Intel Skylake
m1-megamem-96 : Intel Skylake
All other instance types can have automatic Min CPU Platform" | `string` | `""` | no | | sap\_deployment\_debug | OPTIONAL - If this value is set to true, the deployment will generates verbose deployment logs. Only turn this setting on if a Google support engineer asks you to enable debugging. | `bool` | `false` | no | | sap\_hana\_backup\_nfs | Google Filestore share for /hanabackup | `string` | n/a | yes | -| sap\_hana\_deployment\_bucket | The GCS bucket containing the SAP HANA media. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. | `string` | `""` | no | +| sap\_hana\_deployment\_bucket | The Cloud Storage path that contains the SAP HANA media, do not include gs://. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. | `string` | `""` | no | | sap\_hana\_instance\_number | The SAP instance number. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. | `number` | `0` | no | +| sap\_hana\_sapsys\_gid | The Linux GID of the SAPSYS group. By default this is set to 79 | `number` | `79` | no | | sap\_hana\_shared\_nfs | Google Filestore share for /hana/shared | `string` | n/a | yes | | sap\_hana\_sid | The SAP HANA SID. SID must adhere to SAP standard (Three letters or numbers and start with a letter) | `string` | n/a | yes | | sap\_hana\_sidadm\_password | The linux sidadm login password. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. Minimum requirement is 8 characters. | `string` | `""` | no | @@ -57,9 +41,16 @@ Functional example is included in the | sap\_hana\_system\_password | The SAP HANA SYSTEM password. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. Minimum requirement is 8 characters with at least 1 number. | `string` | `""` | no | | sap\_hana\_system\_password\_secret | The secret key used to retrieve the SAP HANA SYSTEM login from Secret Manager (https://cloud.google.com/secret-manager). The Secret Manager password will overwrite the clear text password from sap\_hana\_system\_password if both are set. | `string` | `""` | no | | sap\_hana\_worker\_nodes | Number of worker nodes to create.
This is in addition to the primary node. | `number` | `1` | no | -| secondary\_startup\_url | DO NOT USE | `string` | `"curl -s BUILD.TERRA_SH_URL/sap_hana_scaleout/startup_secondary.sh | bash -s BUILD.TERRA_SH_URL"` | no | +| secondary\_startup\_url | DO NOT USE | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana_scaleout/startup_secondary.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform"` | no | | service\_account | OPTIONAL - Ability to define a custom service account instead of using the default project service account. | `string` | `""` | no | +| standby\_static\_ips | Optional - Defines internal static IP addresses for the standby nodes. | `list(string)` | `[]` | no | | subnetwork | The sub network to deploy the instance in. | `string` | n/a | yes | +| unified\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the primary's unified disk will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| unified\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the primary's unified disk, that is based off of the machine\_type. | `number` | `null` | no | +| unified\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the primary's unified disk will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| use\_single\_data\_log\_disk | Optional - By default two separate disk for data and logs will be made. If set to true, one disk will be used instead. | `bool` | `false` | no | +| vm\_static\_ip | Optional - Defines an internal static IP for the VM. | `string` | `""` | no | +| worker\_static\_ips | Optional - Defines internal static IP addresses for the worker nodes. | `list(string)` | `[]` | no | | zone | Zone where the instances will be created. | `string` | n/a | yes | ## Outputs @@ -70,29 +61,4 @@ Functional example is included in the | hana\_scaleout\_worker\_self\_links | List of self-links for the hana scaleout workers created | | sap\_hana\_primary\_self\_link | Self-link for the primary SAP HANA Scalout instance created. | - - -## Requirements - -These sections describe requirements for using this module. - -### Software - -The following dependencies must be available: - -- [Terraform][terraform] v0.13 -- [Terraform Provider for GCP][terraform-provider-gcp] plugin v4.0 - -## Contributing - -Refer to the [contribution guidelines](./CONTRIBUTING.md) for -information on contributing to this module. - -[iam-module]: https://registry.terraform.io/modules/terraform-google-modules/iam/google -[project-factory-module]: https://registry.terraform.io/modules/terraform-google-modules/project-factory/google -[terraform-provider-gcp]: https://www.terraform.io/docs/providers/google/index.html -[terraform]: https://www.terraform.io/downloads.html - -## Security Disclosures - -Please see our [security disclosure process](./SECURITY.md). + \ No newline at end of file diff --git a/modules/sap_nw/README.md b/modules/sap_nw/README.md new file mode 100644 index 00000000..63a078d4 --- /dev/null +++ b/modules/sap_nw/README.md @@ -0,0 +1,66 @@ +# Terraform for SAP NW for Google Cloud + +This template follows the documented steps + +#TODO (b/194714290): Update this when the new documentation is published + +and deploys GCP resources up to the installation of SAP's central services. + +## Set up Terraform + +Install Terraform on the machine you would like to use to deploy from by +following +https://learn.hashicorp.com/tutorials/terraform/install-cli?in=terraform/gcp-get-started#install-terraform + +## How to deploy + +1. Download .tf file into an empty directory + + # TODO: ADD link + +2. Fill in mandatory variables and if the desired optional variables in the .tf + file. + +3. Deploy + + 1. Run `terraform init` (only needed once) + 2. Run `terraform plan` to see what is going to be deployed. Verify if + names, zones, sizes, etc. are as desired. + 3. Run `terrafom apply` to deploy the resources + +## Additional information + +For additional information see https://www.terraform.io/docs/index.html and +https://cloud.google.com/docs/terraform + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| can\_ip\_forward | Whether sending and receiving of packets with non-matching source or destination IPs is allowed. | `bool` | `true` | no | +| instance\_name | Hostname of the GCE instance. | `string` | n/a | yes | +| linux\_image | Linux image name to use. | `string` | n/a | yes | +| linux\_image\_project | The project which the Linux image belongs to. | `string` | n/a | yes | +| machine\_type | Machine type for the instances. | `string` | n/a | yes | +| network\_tags | OPTIONAL - Network tags can be associated to your instance on deployment. This can be used for firewalling or routing purposes. | `list(string)` | `[]` | no | +| post\_deployment\_script | OPTIONAL - gs:// or https:// location of a script to execute on the created VM's post deployment. | `string` | `""` | no | +| primary\_startup\_url | Startup script to be executed when the VM boots, should not be overridden. | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_nw/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform"` | no | +| project\_id | Project id where the instances will be created. | `string` | n/a | yes | +| public\_ip | OPTIONAL - Defines whether a public IP address should be added to your VM. By default this is set to Yes. Note that if you set this to No without appropriate network nat and tags in place, there will be no route to the internet and thus the installation will fail. | `bool` | `true` | no | +| reservation\_name | Use a reservation specified by RESERVATION\_NAME.
By default ANY\_RESERVATION is used when this variable is empty.
In order for a reservation to be used it must be created with the
"Select specific reservation" selected (specificReservationRequired set to true)
Be sure to create your reservation with the correct Min CPU Platform for the
following instance types:
n1-highmem-32 : Intel Broadwell
n1-highmem-64 : Intel Broadwell
n1-highmem-96 : Intel Skylake
n1-megamem-96 : Intel Skylake
m1-megamem-96 : Intel Skylake
All other instance types can have automatic Min CPU Platform" | `string` | `""` | no | +| sap\_deployment\_debug | OPTIONAL - If this value is set to true, the deployment will generates verbose deployment logs. Only turn this setting on if a Google support engineer asks you to enable debugging. | `bool` | `false` | no | +| sap\_mnt\_size | Size of /sapmnt in GB | `number` | `8` | no | +| service\_account | OPTIONAL - Ability to define a custom service account instead of using the default project service account. | `string` | `""` | no | +| subnetwork | The sub network to deploy the instance in. | `string` | n/a | yes | +| swap\_size | Size in GB of swap volume | `number` | `8` | no | +| usr\_sap\_size | Size of /usr/sap in GB | `number` | `8` | no | +| zone | Zone where the instances will be created. | `string` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| sap\_nw\_self\_link | SAP NW self-link for instance created | + + \ No newline at end of file diff --git a/modules/sap_nw_ha/README.md b/modules/sap_nw_ha/README.md new file mode 100644 index 00000000..557149a9 --- /dev/null +++ b/modules/sap_nw_ha/README.md @@ -0,0 +1,90 @@ +# Terraform for SAP NW HA for Google Cloud +This template follows the documented steps https://cloud.google.com/solutions/sap/docs/netweaver-ha-config-sles and deploys GCP and Pacemaker resources up to the installation of SAP's central services. + +## Set up Terraform + +Install Terraform on the machine you would like to use to deploy from by following https://learn.hashicorp.com/tutorials/terraform/install-cli?in=terraform/gcp-get-started#install-terraform + +## How to deploy + +1. Download .tf file into an empty directory +`curl https://storage.googleapis.com/cloudsapdeploy/deploymentmanager/latest/dm-templates/sap_nw_ha/terraform/sap_nw_ha.tf -o sap_nw_ha.tf` + +2. Fill in mandatory variables and if the desired optional variable in the .tf file. + +3. Deploy + 1. Run `terraform init` (only needed once) + 2. Run `terraform plan` to see what is going to be deployed. Verify if names, zones, sizes, etc. are as desired. + 3. Run `terrafom apply` to deploy the resources + 4. Run `terrafom destroy` to remove the resources + +4. Continue installation of SAP software and setup of remaining cluster resources as per documentation at https://cloud.google.com/solutions/sap/docs/netweaver-ha-config-sles#install_scs_and_ers + +## Additional information + +For additional information see https://www.terraform.io/docs/index.html and https://cloud.google.com/docs/terraform + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| can\_ip\_forward | Whether sending and receiving of packets with non-matching source or destination IPs is allowed. | `bool` | `true` | no | +| ers\_backend\_svc\_name | Name of ERS backend service | `string` | `""` | no | +| ers\_forw\_rule\_name | Name of ERS forwarding rule | `string` | `""` | no | +| ers\_hc\_name | Name of ERS health check | `string` | `""` | no | +| ers\_hc\_port | Port of ERS health check | `string` | `""` | no | +| ers\_inst\_group\_name | Name of ERS instance group | `string` | `""` | no | +| ers\_vip\_address | Address of ERS virtual IP | `string` | `""` | no | +| ers\_vip\_name | Name of ERS virtual IP | `string` | `""` | no | +| hc\_firewall\_rule\_name | Name of firewall rule for the health check | `string` | `""` | no | +| hc\_network\_tag | Network tag for the health check firewall rule | `list(string)` | `[]` | no | +| linux\_image | Linux image name | `string` | n/a | yes | +| linux\_image\_project | Linux image project | `string` | n/a | yes | +| machine\_type | Machine type for the instances | `string` | n/a | yes | +| network | Network for the instances | `string` | n/a | yes | +| network\_tags | Network tags to apply to the instances | `list(string)` | `[]` | no | +| nfs\_path | NFS path for shared file system, e.g. 10.163.58.114:/ssd | `string` | n/a | yes | +| pacemaker\_cluster\_name | Name of Pacemaker cluster. | `string` | `""` | no | +| post\_deployment\_script | Specifies the location of a script to run after the deployment is complete.
The script should be hosted on a web server or in a GCS bucket. The URL should
begin with http:// https:// or gs://. Note that this script will be executed
on all VM's that the template creates. If you only want to run it on the master
instance you will need to add a check at the top of your script. | `string` | `""` | no | +| primary\_reservation\_name | Use a reservation specified by RESERVATION\_NAME.
By default ANY\_RESERVATION is used when this variable is empty.
In order for a reservation to be used it must be created with the
"Select specific reservation" selected (specificReservationRequired set to true)
Be sure to create your reservation with the correct Min CPU Platform for the
following instance types:
n1-highmem-32 : Intel Broadwell
n1-highmem-64 : Intel Broadwell
n1-highmem-96 : Intel Skylake
n1-megamem-96 : Intel Skylake
m1-megamem-96 : "Intel Skylake"
All other instance types can have automatic Min CPU Platform" | `string` | `""` | no | +| primary\_startup\_url | DO NOT USE | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_nw_ha/startup_scs.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform"` | no | +| project\_id | Project id where the instances will be created | `string` | n/a | yes | +| public\_ip | Create an ephemeral public ip for the instances | `bool` | `false` | no | +| sap\_deployment\_debug | Debug log level for deployment | `bool` | `false` | no | +| sap\_ers\_instance\_number | ERS instance number | `string` | `"10"` | no | +| sap\_mnt\_size | Size of /sapmnt in GB | `number` | `8` | no | +| sap\_nw\_abap | Is this a Netweaver ABAP installation. Set 'false' for NW Java. Dual stack is not supported by this script. | `bool` | `true` | no | +| sap\_primary\_instance | Name of first instance (initial SCS location) | `string` | n/a | yes | +| sap\_primary\_zone | Zone where the first instance will be created | `string` | n/a | yes | +| sap\_scs\_instance\_number | SCS instance number | `string` | `"00"` | no | +| sap\_secondary\_instance | Name of second instance (initial ERS location) | `string` | n/a | yes | +| sap\_secondary\_zone | Zone where the second instance will be created | `string` | n/a | yes | +| sap\_sid | SAP System ID | `string` | n/a | yes | +| scs\_backend\_svc\_name | Name of SCS backend service | `string` | `""` | no | +| scs\_forw\_rule\_name | Name of SCS forwarding rule | `string` | `""` | no | +| scs\_hc\_name | Name of SCS health check | `string` | `""` | no | +| scs\_hc\_port | Port of SCS health check | `string` | `""` | no | +| scs\_inst\_group\_name | Name of SCS instance group | `string` | `""` | no | +| scs\_vip\_address | Address of SCS virtual IP | `string` | `""` | no | +| scs\_vip\_name | Name of SCS virtual IP | `string` | `""` | no | +| secondary\_reservation\_name | Use a reservation specified by RESERVATION\_NAME.
By default ANY\_RESERVATION is used when this variable is empty.
In order for a reservation to be used it must be created with the
"Select specific reservation" selected (specificReservationRequired set to true)
Be sure to create your reservation with the correct Min CPU Platform for the
following instance types:
n1-highmem-32 : Intel Broadwell
n1-highmem-64 : Intel Broadwell
n1-highmem-96 : Intel Skylake
n1-megamem-96 : Intel Skylake
m1-megamem-96 : "Intel Skylake"
All other instance types can have automatic Min CPU Platform" | `string` | `""` | no | +| secondary\_startup\_url | DO NOT USE | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_nw_ha/startup_ers.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform"` | no | +| service\_account | Service account that will be used as the service account on the created instance.
Leave this blank to use the project default service account | `string` | `""` | no | +| subnetwork | Subnetwork for the instances | `string` | n/a | yes | +| swap\_size | Size in GB of swap volume | `number` | `8` | no | +| usr\_sap\_size | Size of /usr/sap in GB | `number` | `8` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| ers\_instance | ERS instance | +| nw\_forwarding\_rules | Forwarding rules | +| nw\_hc | Health Checks | +| nw\_hc\_firewall | Firewall rule for the Health Checks | +| nw\_instance\_groups | NW Instance Groups | +| nw\_regional\_backend\_services | Backend Services | +| nw\_vips | NW virtual IPs | +| scs\_instance | SCS instance | + + \ No newline at end of file From 50c8e3cf6f612f06255f3c0e2901cbf7dda97f6d Mon Sep 17 00:00:00 2001 From: Google SAP Deployments Dev Date: Fri, 12 Apr 2024 13:43:13 +0000 Subject: [PATCH 4/8] Project import generated by Copybara. GitOrigin-RevId: 1d264d8c8ddfb1294991e909ed32e0861ae35d12 --- modules/sap_hana/README.md | 35 ++++++++++++--- modules/sap_hana/main.tf | 2 +- modules/sap_hana/variables.tf | 2 +- modules/sap_hana_ha/README.md | 66 ++++++++++++++++++++++++++++- modules/sap_hana_ha/main.tf | 2 +- modules/sap_hana_scaleout/README.md | 66 ++++++++++++++++++++++++++++- modules/sap_hana_scaleout/main.tf | 2 +- modules/sap_nw/README.md | 34 ++++++--------- modules/sap_nw/main.tf | 2 +- modules/sap_nw_ha/README.md | 32 ++++++++++++-- modules/sap_nw_ha/main.tf | 2 +- modules/sap_nw_ha/versions.tf | 2 +- 12 files changed, 207 insertions(+), 40 deletions(-) diff --git a/modules/sap_hana/README.md b/modules/sap_hana/README.md index dd1fccab..3423d12b 100644 --- a/modules/sap_hana/README.md +++ b/modules/sap_hana/README.md @@ -1,4 +1,4 @@ -# Terraform for SAP NW HA for Google Cloud +# Terraform for SAP HANA for Google Cloud This template follows the documented steps https://cloud.google.com/solutions/sap/docs/certifications-sap-hana and deploys @@ -13,8 +13,8 @@ https://learn.hashicorp.com/tutorials/terraform/install-cli?in=terraform/gcp-get ## How to deploy 1. Download .tf file into an empty directory `curl - https://storage.googleapis.com/cloudsapdeploy/deploymentmanager/latest/dm-templates/sap_nw_ha/terraform/sap_nw_ha.tf - -o sap_nw_ha.tf` + https://storage.googleapis.com/cloudsapdeploy/terraform/latest/terraform/sap_hana/terraform/sap_hana.tf + -o sap_hana.tf` 2. Fill in mandatory variables and if the desired optional variable in the .tf file. @@ -29,7 +29,7 @@ https://learn.hashicorp.com/tutorials/terraform/install-cli?in=terraform/gcp-get 4. Continue installation of SAP software and setup of remaining cluster resources as per documentation at - https://cloud.google.com/solutions/sap/docs/sap-hana-deployment-guide + https://cloud.google.com/solutions/sap/docs/sap-hana-deployment-guide-tf ## Additional information @@ -114,4 +114,29 @@ https://cloud.google.com/docs/terraform | sap\_hana\_primary\_self\_link | SAP HANA self-link for the primary instance created | | sap\_hana\_worker\_self\_links | SAP HANA self-links for the secondary instances created | - \ No newline at end of file + + +## Requirements + +These sections describe requirements for using this module. + +### Software + +The following dependencies must be available: + +- [Terraform][terraform] v0.13 +- [Terraform Provider for GCP][terraform-provider-gcp] plugin v4.0 + +## Contributing + +Refer to the [contribution guidelines](./CONTRIBUTING.md) for +information on contributing to this module. + +[iam-module]: https://registry.terraform.io/modules/terraform-google-modules/iam/google +[project-factory-module]: https://registry.terraform.io/modules/terraform-google-modules/project-factory/google +[terraform-provider-gcp]: https://www.terraform.io/docs/providers/google/index.html +[terraform]: https://www.terraform.io/downloads.html + +## Security Disclosures + +Please see our [security disclosure process](./SECURITY.md). diff --git a/modules/sap_hana/main.tf b/modules/sap_hana/main.tf index 5b358bee..f636c6ea 100644 --- a/modules/sap_hana/main.tf +++ b/modules/sap_hana/main.tf @@ -17,7 +17,7 @@ # Terraform SAP HANA for Google Cloud # # Version: 2.0.202404101403 -# Build Hash: 4d5e66e2ca20a6d498491377677dcc2f3579ebd7 +# Build Hash: 91fd957c3c747f1e8b4711150ebe2f817198c4b1 # ################################################################################ diff --git a/modules/sap_hana/variables.tf b/modules/sap_hana/variables.tf index 2c8ee7fe..f1cd9514 100644 --- a/modules/sap_hana/variables.tf +++ b/modules/sap_hana/variables.tf @@ -496,4 +496,4 @@ variable "can_ip_forward" { type = bool description = "Whether sending and receiving of packets with non-matching source or destination IPs is allowed." default = true -} \ No newline at end of file +} diff --git a/modules/sap_hana_ha/README.md b/modules/sap_hana_ha/README.md index 959de4fb..d27aae7e 100644 --- a/modules/sap_hana_ha/README.md +++ b/modules/sap_hana_ha/README.md @@ -1,4 +1,41 @@ -TODO add README info +# Terraform for SAP HANA HA for Google Cloud + +This template follows the documented steps +https://cloud.google.com/solutions/sap/docs/certifications-sap-hana and deploys +GCP and Pacemaker resources up to the installation of SAP's central services. + +## Set up Terraform + +Install Terraform on the machine you would like to use to deploy from by +following +https://learn.hashicorp.com/tutorials/terraform/install-cli?in=terraform/gcp-get-started#install-terraform + +## How to deploy + +1. Download .tf file into an empty directory `curl + https://storage.googleapis.com/cloudsapdeploy/terraform/latest/terraform/sap_hana_ha/terraform/sap_hana_ha.tf + -o sap_hana_ha.tf` + +2. Fill in mandatory variables and if the desired optional variable in the .tf + file. + +3. Deploy + + 1. Run `terraform init` (only needed once) + 2. Run `terraform plan` to see what is going to be deployed. Verify if + names, zones, sizes, etc. are as desired. + 3. Run `terrafom apply` to deploy the resources + 4. Run `terrafom destroy` to remove the resources + +4. Continue installation of SAP software and setup of remaining cluster + resources as per documentation at + https://cloud.google.com/solutions/sap/docs/sap-hana-ha-tf-deployment + +## Additional information + +For additional information see https://www.terraform.io/docs/index.html and +https://cloud.google.com/docs/terraform + ## Inputs @@ -91,4 +128,29 @@ TODO add README info | sap\_hana\_ha\_secondary\_instance\_self\_link | Self-link for the secondary SAP HANA HA instance created. | | sap\_hana\_ha\_secondary\_worker\_self\_links | Self-link for the worker nodes in the secondary SAP HANA HA instance. | - \ No newline at end of file + + +## Requirements + +These sections describe requirements for using this module. + +### Software + +The following dependencies must be available: + +- [Terraform][terraform] v0.13 +- [Terraform Provider for GCP][terraform-provider-gcp] plugin v4.0 + +## Contributing + +Refer to the [contribution guidelines](./CONTRIBUTING.md) for +information on contributing to this module. + +[iam-module]: https://registry.terraform.io/modules/terraform-google-modules/iam/google +[project-factory-module]: https://registry.terraform.io/modules/terraform-google-modules/project-factory/google +[terraform-provider-gcp]: https://www.terraform.io/docs/providers/google/index.html +[terraform]: https://www.terraform.io/downloads.html + +## Security Disclosures + +Please see our [security disclosure process](./SECURITY.md). diff --git a/modules/sap_hana_ha/main.tf b/modules/sap_hana_ha/main.tf index 0f791809..46199968 100644 --- a/modules/sap_hana_ha/main.tf +++ b/modules/sap_hana_ha/main.tf @@ -17,7 +17,7 @@ # Terraform SAP HANA HA for Google Cloud # # Version: 2.0.202404101403 -# Build Hash: 4d5e66e2ca20a6d498491377677dcc2f3579ebd7 +# Build Hash: 91fd957c3c747f1e8b4711150ebe2f817198c4b1 # ################################################################################ diff --git a/modules/sap_hana_scaleout/README.md b/modules/sap_hana_scaleout/README.md index bdad16a0..63852a8f 100644 --- a/modules/sap_hana_scaleout/README.md +++ b/modules/sap_hana_scaleout/README.md @@ -1,4 +1,41 @@ -TODO add README info +# Terraform for SAP HANA Scaleout for Google Cloud + +This template follows the documented steps +https://cloud.google.com/solutions/sap/docs/certifications-sap-hana and deploys +GCP and Pacemaker resources up to the installation of SAP's central services. + +## Set up Terraform + +Install Terraform on the machine you would like to use to deploy from by +following +https://learn.hashicorp.com/tutorials/terraform/install-cli?in=terraform/gcp-get-started#install-terraform + +## How to deploy + +1. Download .tf file into an empty directory `curl + https://storage.googleapis.com/cloudsapdeploy/terraform/latest/terraform/sap_hana_scaleout/terraform/sap_hana_scaleout.tf + -o sap_hana_scaleout.tf` + +2. Fill in mandatory variables and if the desired optional variable in the .tf + file. + +3. Deploy + + 1. Run `terraform init` (only needed once) + 2. Run `terraform plan` to see what is going to be deployed. Verify if + names, zones, sizes, etc. are as desired. + 3. Run `terrafom apply` to deploy the resources + 4. Run `terrafom destroy` to remove the resources + +4. Continue installation of SAP software and setup of remaining cluster + resources as per documentation at + https://cloud.google.com/solutions/sap/docs/sap-hana-ha-scaleout-tf-deployment-guide + +## Additional information + +For additional information see https://www.terraform.io/docs/index.html and +https://cloud.google.com/docs/terraform + ## Inputs @@ -61,4 +98,29 @@ TODO add README info | hana\_scaleout\_worker\_self\_links | List of self-links for the hana scaleout workers created | | sap\_hana\_primary\_self\_link | Self-link for the primary SAP HANA Scalout instance created. | - \ No newline at end of file + + +## Requirements + +These sections describe requirements for using this module. + +### Software + +The following dependencies must be available: + +- [Terraform][terraform] v0.13 +- [Terraform Provider for GCP][terraform-provider-gcp] plugin v4.0 + +## Contributing + +Refer to the [contribution guidelines](./CONTRIBUTING.md) for +information on contributing to this module. + +[iam-module]: https://registry.terraform.io/modules/terraform-google-modules/iam/google +[project-factory-module]: https://registry.terraform.io/modules/terraform-google-modules/project-factory/google +[terraform-provider-gcp]: https://www.terraform.io/docs/providers/google/index.html +[terraform]: https://www.terraform.io/downloads.html + +## Security Disclosures + +Please see our [security disclosure process](./SECURITY.md). diff --git a/modules/sap_hana_scaleout/main.tf b/modules/sap_hana_scaleout/main.tf index e7b0ed4c..23de33f3 100644 --- a/modules/sap_hana_scaleout/main.tf +++ b/modules/sap_hana_scaleout/main.tf @@ -18,7 +18,7 @@ # # # Version: 2.0.202404101403 -# Build Hash: 4d5e66e2ca20a6d498491377677dcc2f3579ebd7 +# Build Hash: 91fd957c3c747f1e8b4711150ebe2f817198c4b1 # ################################################################################ diff --git a/modules/sap_nw/README.md b/modules/sap_nw/README.md index 63a078d4..4d45e608 100644 --- a/modules/sap_nw/README.md +++ b/modules/sap_nw/README.md @@ -1,37 +1,29 @@ # Terraform for SAP NW for Google Cloud - -This template follows the documented steps - -#TODO (b/194714290): Update this when the new documentation is published - -and deploys GCP resources up to the installation of SAP's central services. +This template follows the documented steps https://cloud.google.com/solutions/sap/docs/netweaver-deployment-guide-linux and deploys GCP and Pacemaker resources up to the installation of SAP's central services. ## Set up Terraform -Install Terraform on the machine you would like to use to deploy from by -following -https://learn.hashicorp.com/tutorials/terraform/install-cli?in=terraform/gcp-get-started#install-terraform +Install Terraform on the machine you would like to use to deploy from by following https://learn.hashicorp.com/tutorials/terraform/install-cli?in=terraform/gcp-get-started#install-terraform ## How to deploy -1. Download .tf file into an empty directory +1. Download .tf file into an empty directory +`curl https://storage.googleapis.com/cloudsapdeploy/terraform/latest/terraform/sap_nw/terraform/sap_nw.tf -o sap_nw.tf` - # TODO: ADD link +2. Fill in mandatory variables and if the desired optional variable in the .tf file. -2. Fill in mandatory variables and if the desired optional variables in the .tf - file. +3. Deploy + 1. Run `terraform init` (only needed once) + 2. Run `terraform plan` to see what is going to be deployed. Verify if names, zones, sizes, etc. are as desired. + 3. Run `terrafom apply` to deploy the resources + 4. Run `terrafom destroy` to remove the resources -3. Deploy - - 1. Run `terraform init` (only needed once) - 2. Run `terraform plan` to see what is going to be deployed. Verify if - names, zones, sizes, etc. are as desired. - 3. Run `terrafom apply` to deploy the resources +4. Continue installation of SAP software and setup of remaining cluster resources as per documentation at https://cloud.google.com/solutions/sap/docs/netweaver-deployment-guide-linux ## Additional information -For additional information see https://www.terraform.io/docs/index.html and -https://cloud.google.com/docs/terraform +For additional information see https://www.terraform.io/docs/index.html and https://cloud.google.com/docs/terraform + ## Inputs diff --git a/modules/sap_nw/main.tf b/modules/sap_nw/main.tf index 0432a784..cca42da9 100644 --- a/modules/sap_nw/main.tf +++ b/modules/sap_nw/main.tf @@ -17,7 +17,7 @@ # Terraform SAP NW for Google Cloud # # Version: 2.0.202404101403 -# Build Hash: 4d5e66e2ca20a6d498491377677dcc2f3579ebd7 +# Build Hash: 91fd957c3c747f1e8b4711150ebe2f817198c4b1 # ################################################################################ diff --git a/modules/sap_nw_ha/README.md b/modules/sap_nw_ha/README.md index 557149a9..1696ea39 100644 --- a/modules/sap_nw_ha/README.md +++ b/modules/sap_nw_ha/README.md @@ -1,5 +1,5 @@ # Terraform for SAP NW HA for Google Cloud -This template follows the documented steps https://cloud.google.com/solutions/sap/docs/netweaver-ha-config-sles and deploys GCP and Pacemaker resources up to the installation of SAP's central services. +This template follows the documented steps https://cloud.google.com/solutions/sap/docs/netweaver-ha-config-sles and https://cloud.google.com/solutions/sap/docs/netweaver-ha-config-rhel and deploys GCP and Pacemaker resources up to the installation of SAP's central services. ## Set up Terraform @@ -18,11 +18,12 @@ Install Terraform on the machine you would like to use to deploy from by followi 3. Run `terrafom apply` to deploy the resources 4. Run `terrafom destroy` to remove the resources -4. Continue installation of SAP software and setup of remaining cluster resources as per documentation at https://cloud.google.com/solutions/sap/docs/netweaver-ha-config-sles#install_scs_and_ers +4. Continue installation of SAP software and setup of remaining cluster resources as per documentation at https://cloud.google.com/solutions/sap/docs/netweaver-ha-config-sles or https://cloud.google.com/solutions/sap/docs/netweaver-ha-config-rhel ## Additional information For additional information see https://www.terraform.io/docs/index.html and https://cloud.google.com/docs/terraform + ## Inputs @@ -87,4 +88,29 @@ For additional information see https://www.terraform.io/docs/index.html and http | nw\_vips | NW virtual IPs | | scs\_instance | SCS instance | - \ No newline at end of file + + +## Requirements + +These sections describe requirements for using this module. + +### Software + +The following dependencies must be available: + +- [Terraform][terraform] v0.13 +- [Terraform Provider for GCP][terraform-provider-gcp] plugin v4.0 + +## Contributing + +Refer to the [contribution guidelines](./CONTRIBUTING.md) for +information on contributing to this module. + +[iam-module]: https://registry.terraform.io/modules/terraform-google-modules/iam/google +[project-factory-module]: https://registry.terraform.io/modules/terraform-google-modules/project-factory/google +[terraform-provider-gcp]: https://www.terraform.io/docs/providers/google/index.html +[terraform]: https://www.terraform.io/downloads.html + +## Security Disclosures + +Please see our [security disclosure process](./SECURITY.md). diff --git a/modules/sap_nw_ha/main.tf b/modules/sap_nw_ha/main.tf index b1898633..0123ac05 100644 --- a/modules/sap_nw_ha/main.tf +++ b/modules/sap_nw_ha/main.tf @@ -17,7 +17,7 @@ # Terraform SAP NW HA for Google Cloud # # Version: 2.0.202404101403 -# Build Hash: 4d5e66e2ca20a6d498491377677dcc2f3579ebd7 +# Build Hash: 91fd957c3c747f1e8b4711150ebe2f817198c4b1 # ################################################################################ diff --git a/modules/sap_nw_ha/versions.tf b/modules/sap_nw_ha/versions.tf index 5afb4ea1..fb459560 100644 --- a/modules/sap_nw_ha/versions.tf +++ b/modules/sap_nw_ha/versions.tf @@ -21,4 +21,4 @@ terraform { version = ">= 4.0.0, < 6" } } -} \ No newline at end of file +} From 9fddf387aa0f04d81b8d164e83c2e85fda3dfc1b Mon Sep 17 00:00:00 2001 From: Google SAP Deployments Dev Date: Fri, 12 Apr 2024 14:17:42 +0000 Subject: [PATCH 5/8] Project import generated by Copybara. GitOrigin-RevId: 978a58ebebb1e35f0a2cc85d3b7cc41a76cfe38d --- modules/sap_hana/main.tf | 2 +- modules/sap_hana_ha/main.tf | 2 +- modules/sap_hana_scaleout/main.tf | 2 +- modules/sap_nw/README.md | 28 ++++++++++++++++++++++++++-- modules/sap_nw/main.tf | 2 +- modules/sap_nw_ha/main.tf | 2 +- 6 files changed, 31 insertions(+), 7 deletions(-) diff --git a/modules/sap_hana/main.tf b/modules/sap_hana/main.tf index f636c6ea..96e1323c 100644 --- a/modules/sap_hana/main.tf +++ b/modules/sap_hana/main.tf @@ -17,7 +17,7 @@ # Terraform SAP HANA for Google Cloud # # Version: 2.0.202404101403 -# Build Hash: 91fd957c3c747f1e8b4711150ebe2f817198c4b1 +# Build Hash: c1f78e4d8c44de3be18fc7b3a64ccf60a94a85bc # ################################################################################ diff --git a/modules/sap_hana_ha/main.tf b/modules/sap_hana_ha/main.tf index 46199968..64fd8749 100644 --- a/modules/sap_hana_ha/main.tf +++ b/modules/sap_hana_ha/main.tf @@ -17,7 +17,7 @@ # Terraform SAP HANA HA for Google Cloud # # Version: 2.0.202404101403 -# Build Hash: 91fd957c3c747f1e8b4711150ebe2f817198c4b1 +# Build Hash: c1f78e4d8c44de3be18fc7b3a64ccf60a94a85bc # ################################################################################ diff --git a/modules/sap_hana_scaleout/main.tf b/modules/sap_hana_scaleout/main.tf index 23de33f3..11659512 100644 --- a/modules/sap_hana_scaleout/main.tf +++ b/modules/sap_hana_scaleout/main.tf @@ -18,7 +18,7 @@ # # # Version: 2.0.202404101403 -# Build Hash: 91fd957c3c747f1e8b4711150ebe2f817198c4b1 +# Build Hash: c1f78e4d8c44de3be18fc7b3a64ccf60a94a85bc # ################################################################################ diff --git a/modules/sap_nw/README.md b/modules/sap_nw/README.md index 4d45e608..a9c7ca4b 100644 --- a/modules/sap_nw/README.md +++ b/modules/sap_nw/README.md @@ -24,7 +24,6 @@ Install Terraform on the machine you would like to use to deploy from by followi For additional information see https://www.terraform.io/docs/index.html and https://cloud.google.com/docs/terraform - ## Inputs @@ -55,4 +54,29 @@ For additional information see https://www.terraform.io/docs/index.html and http |------|-------------| | sap\_nw\_self\_link | SAP NW self-link for instance created | - \ No newline at end of file + + +## Requirements + +These sections describe requirements for using this module. + +### Software + +The following dependencies must be available: + +- [Terraform][terraform] v0.13 +- [Terraform Provider for GCP][terraform-provider-gcp] plugin v4.0 + +## Contributing + +Refer to the [contribution guidelines](./CONTRIBUTING.md) for +information on contributing to this module. + +[iam-module]: https://registry.terraform.io/modules/terraform-google-modules/iam/google +[project-factory-module]: https://registry.terraform.io/modules/terraform-google-modules/project-factory/google +[terraform-provider-gcp]: https://www.terraform.io/docs/providers/google/index.html +[terraform]: https://www.terraform.io/downloads.html + +## Security Disclosures + +Please see our [security disclosure process](./SECURITY.md). diff --git a/modules/sap_nw/main.tf b/modules/sap_nw/main.tf index cca42da9..9e0eebda 100644 --- a/modules/sap_nw/main.tf +++ b/modules/sap_nw/main.tf @@ -17,7 +17,7 @@ # Terraform SAP NW for Google Cloud # # Version: 2.0.202404101403 -# Build Hash: 91fd957c3c747f1e8b4711150ebe2f817198c4b1 +# Build Hash: c1f78e4d8c44de3be18fc7b3a64ccf60a94a85bc # ################################################################################ diff --git a/modules/sap_nw_ha/main.tf b/modules/sap_nw_ha/main.tf index 0123ac05..ae7ace84 100644 --- a/modules/sap_nw_ha/main.tf +++ b/modules/sap_nw_ha/main.tf @@ -17,7 +17,7 @@ # Terraform SAP NW HA for Google Cloud # # Version: 2.0.202404101403 -# Build Hash: 91fd957c3c747f1e8b4711150ebe2f817198c4b1 +# Build Hash: c1f78e4d8c44de3be18fc7b3a64ccf60a94a85bc # ################################################################################ From 37f627395511e2a4413b9c3881cf615f193711cf Mon Sep 17 00:00:00 2001 From: Sam Serdlow Date: Tue, 16 Apr 2024 15:06:22 +0000 Subject: [PATCH 6/8] adding in simple examples. --- examples/sap_hana_simple/README.md | 13 ---------- examples/sap_nw_ha_simple/README.md | 13 ++++++++++ examples/sap_nw_ha_simple/main.tf | 36 ++++++++++++++++++++++++++ examples/sap_nw_ha_simple/outputs.tf | 23 ++++++++++++++++ examples/sap_nw_ha_simple/variables.tf | 20 ++++++++++++++ examples/sap_nw_simple/README.md | 28 ++++++++++++++++++++ examples/sap_nw_simple/main.tf | 28 ++++++++++++++++++++ examples/sap_nw_simple/outputs.tf | 19 ++++++++++++++ examples/sap_nw_simple/variables.tf | 20 ++++++++++++++ 9 files changed, 187 insertions(+), 13 deletions(-) create mode 100644 examples/sap_nw_ha_simple/README.md create mode 100644 examples/sap_nw_ha_simple/main.tf create mode 100644 examples/sap_nw_ha_simple/outputs.tf create mode 100644 examples/sap_nw_ha_simple/variables.tf create mode 100644 examples/sap_nw_simple/README.md create mode 100644 examples/sap_nw_simple/main.tf create mode 100644 examples/sap_nw_simple/outputs.tf create mode 100644 examples/sap_nw_simple/variables.tf diff --git a/examples/sap_hana_simple/README.md b/examples/sap_hana_simple/README.md index 9bd01c5d..d12ecce3 100644 --- a/examples/sap_hana_simple/README.md +++ b/examples/sap_hana_simple/README.md @@ -3,19 +3,6 @@ This example illustrates how to use the latest release of the terraform module for SAP on Google Cloud for provisioning SAP HANA -## Inputs - -| Name | Description | Type | Default | Required | -|------|-------------|------|---------|:--------:| -| project\_id | Project id where the instances will be created. | `string` | n/a | yes | - -## Outputs - -| Name | Description | -|------|-------------| -| sap\_hana\_primary\_self\_link | SAP HANA self-link for the primary instance created | -| sap\_hana\_worker\_self\_links | SAP HANA self-links for the secondary instances created | - To provision this example, run the following from within this directory: diff --git a/examples/sap_nw_ha_simple/README.md b/examples/sap_nw_ha_simple/README.md new file mode 100644 index 00000000..a2bcdfed --- /dev/null +++ b/examples/sap_nw_ha_simple/README.md @@ -0,0 +1,13 @@ +# SAP HANA HA example + +This example illustrates how to use the latest release of the terraform module for SAP on Google Cloud +for provisioning SAP HANA with HA + + + + +To provision this example, run the following from within this directory: +- `terraform init` to get the plugins +- `terraform plan` to see the infrastructure plan +- `terraform apply` to apply the infrastructure build +- `terraform destroy` to destroy the built infrastructure diff --git a/examples/sap_nw_ha_simple/main.tf b/examples/sap_nw_ha_simple/main.tf new file mode 100644 index 00000000..32718c24 --- /dev/null +++ b/examples/sap_nw_ha_simple/main.tf @@ -0,0 +1,36 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +module "sap_nw_ha" { + source = "terraform-google-modules/sap/google//modules/sap_nw" + version = "~> 1.0" + + project_id = var.project_id + machine_type = "n1-highmem-32" + network = "default" + subnetwork = "default" + linux_image = "sles-15-sp2-sap" + linux_image_project = "suse-sap-cloud" + + sap_primary_instance = "prd-nw1" + sap_primary_zone = "us-central1-b" + + sap_secondary_instance = "prd-nw2" + sap_secondary_zone = "us-central1-c" + + nfs_path = "1.2.3.4:/my_path" + sap_sid = "PE1" +} diff --git a/examples/sap_nw_ha_simple/outputs.tf b/examples/sap_nw_ha_simple/outputs.tf new file mode 100644 index 00000000..79e040c7 --- /dev/null +++ b/examples/sap_nw_ha_simple/outputs.tf @@ -0,0 +1,23 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +output "sap_nw_ha_scs_instance_self_link" { + description = "Self-link for the primary SAP NW instance created." + value = module.sap_nw_ha.scs_instance +} +output "sap_nw_ha_ers_instance_self_link" { + description = "Self-link for the primary SAP NW instance created." + value = module.sap_nw_ha.ers_instance +} diff --git a/examples/sap_nw_ha_simple/variables.tf b/examples/sap_nw_ha_simple/variables.tf new file mode 100644 index 00000000..53c86a8e --- /dev/null +++ b/examples/sap_nw_ha_simple/variables.tf @@ -0,0 +1,20 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "project_id" { + type = string + description = "Project id where the instances will be created." +} diff --git a/examples/sap_nw_simple/README.md b/examples/sap_nw_simple/README.md new file mode 100644 index 00000000..fc86fea8 --- /dev/null +++ b/examples/sap_nw_simple/README.md @@ -0,0 +1,28 @@ +# SAP HANA HA example + +This example illustrates how to use the latest release of the terraform module for SAP on Google Cloud +for provisioning SAP HANA with HA + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| project\_id | Project id where the instances will be created. | `string` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| sap\_hana\_ha\_firewall\_link | Link to the optional fire wall | +| sap\_hana\_ha\_loadbalander\_link | Link to the optional load balancer | +| sap\_hana\_ha\_primary\_instance\_self\_link | Self-link for the primary SAP HANA HA instance created. | +| sap\_hana\_ha\_secondary\_instance\_self\_link | Self-link for the secondary SAP HANA HA instance created. | + + + +To provision this example, run the following from within this directory: +- `terraform init` to get the plugins +- `terraform plan` to see the infrastructure plan +- `terraform apply` to apply the infrastructure build +- `terraform destroy` to destroy the built infrastructure diff --git a/examples/sap_nw_simple/main.tf b/examples/sap_nw_simple/main.tf new file mode 100644 index 00000000..ba56545d --- /dev/null +++ b/examples/sap_nw_simple/main.tf @@ -0,0 +1,28 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +module "sap_nw" { + source = "terraform-google-modules/sap/google//modules/sap_nw" + version = "~> 1.0" + + project_id = var.project_id + zone = "us-east1-b" + machine_type = "n1-highmem-32" + subnetwork = "default" + linux_image = "rhel-8-4-sap-ha" + linux_image_project = "rhel-sap-cloud" + instance_name = "nw-instance" +} diff --git a/examples/sap_nw_simple/outputs.tf b/examples/sap_nw_simple/outputs.tf new file mode 100644 index 00000000..f4d5bac3 --- /dev/null +++ b/examples/sap_nw_simple/outputs.tf @@ -0,0 +1,19 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +output "sap_nw_primary_instance_self_link" { + description = "Self-link for the primary SAP NW instance created." + value = module.sap_nw.sap_nw_self_link +} diff --git a/examples/sap_nw_simple/variables.tf b/examples/sap_nw_simple/variables.tf new file mode 100644 index 00000000..53c86a8e --- /dev/null +++ b/examples/sap_nw_simple/variables.tf @@ -0,0 +1,20 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "project_id" { + type = string + description = "Project id where the instances will be created." +} From 46da8262ed61c2cdb007e29af7de0bfbd9407322 Mon Sep 17 00:00:00 2001 From: Sam Serdlow Date: Tue, 16 Apr 2024 15:06:22 +0000 Subject: [PATCH 7/8] adding in simple examples. --- examples/sap_nw_ha_simple/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/sap_nw_ha_simple/main.tf b/examples/sap_nw_ha_simple/main.tf index 32718c24..414d396d 100644 --- a/examples/sap_nw_ha_simple/main.tf +++ b/examples/sap_nw_ha_simple/main.tf @@ -15,7 +15,7 @@ */ module "sap_nw_ha" { - source = "terraform-google-modules/sap/google//modules/sap_nw" + source = "terraform-google-modules/sap/google//modules/sap_nw_ha" version = "~> 1.0" project_id = var.project_id From fe1fa1e8c3ee16cdbd37a327852334ae85a765df Mon Sep 17 00:00:00 2001 From: Sam Serdlow Date: Wed, 17 Apr 2024 15:28:02 +0000 Subject: [PATCH 8/8] Re-running the makefile. --- examples/sap_hana_simple/README.md | 13 +++++++++++++ examples/sap_nw_ha_simple/README.md | 13 +++++++++++++ examples/sap_nw_simple/README.md | 5 +---- 3 files changed, 27 insertions(+), 4 deletions(-) diff --git a/examples/sap_hana_simple/README.md b/examples/sap_hana_simple/README.md index d12ecce3..9bd01c5d 100644 --- a/examples/sap_hana_simple/README.md +++ b/examples/sap_hana_simple/README.md @@ -3,6 +3,19 @@ This example illustrates how to use the latest release of the terraform module for SAP on Google Cloud for provisioning SAP HANA +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| project\_id | Project id where the instances will be created. | `string` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| sap\_hana\_primary\_self\_link | SAP HANA self-link for the primary instance created | +| sap\_hana\_worker\_self\_links | SAP HANA self-links for the secondary instances created | + To provision this example, run the following from within this directory: diff --git a/examples/sap_nw_ha_simple/README.md b/examples/sap_nw_ha_simple/README.md index a2bcdfed..85b3f895 100644 --- a/examples/sap_nw_ha_simple/README.md +++ b/examples/sap_nw_ha_simple/README.md @@ -4,6 +4,19 @@ This example illustrates how to use the latest release of the terraform module f for provisioning SAP HANA with HA +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| project\_id | Project id where the instances will be created. | `string` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| sap\_nw\_ha\_ers\_instance\_self\_link | Self-link for the primary SAP NW instance created. | +| sap\_nw\_ha\_scs\_instance\_self\_link | Self-link for the primary SAP NW instance created. | + To provision this example, run the following from within this directory: diff --git a/examples/sap_nw_simple/README.md b/examples/sap_nw_simple/README.md index fc86fea8..dc28bfc6 100644 --- a/examples/sap_nw_simple/README.md +++ b/examples/sap_nw_simple/README.md @@ -14,10 +14,7 @@ for provisioning SAP HANA with HA | Name | Description | |------|-------------| -| sap\_hana\_ha\_firewall\_link | Link to the optional fire wall | -| sap\_hana\_ha\_loadbalander\_link | Link to the optional load balancer | -| sap\_hana\_ha\_primary\_instance\_self\_link | Self-link for the primary SAP HANA HA instance created. | -| sap\_hana\_ha\_secondary\_instance\_self\_link | Self-link for the secondary SAP HANA HA instance created. | +| sap\_nw\_primary\_instance\_self\_link | Self-link for the primary SAP NW instance created. |