Skip to content

Commit

Permalink
No public description
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 681999175
  • Loading branch information
sjswerdlow authored and copybara-github committed Oct 3, 2024
1 parent 43cd589 commit 01d6870
Show file tree
Hide file tree
Showing 6 changed files with 171 additions and 13 deletions.
52 changes: 47 additions & 5 deletions modules/sap_hana/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,8 @@ locals {
num_data_disks = var.enable_data_striping ? var.number_data_disks : 1
num_log_disks = var.enable_log_striping ? var.number_log_disks : 1

sole_tenant_name_prefix = var.sole_tenant_name_prefix != "" ? var.sole_tenant_name_prefix : "st-${lower(var.sap_hana_sid)}"
# Minimum disk sizes are used to ensure throughput. Extreme disks don't need this.
# All 'over provisioned' capacity is to go onto the data disk.
final_disk_type = var.disk_type == "" ? (local.default_hyperdisk_extreme ? "hyperdisk-extreme" : (local.default_hyperdisk_balanced ? "hyperdisk-balanced" : "pd-ssd")) : var.disk_type
Expand Down Expand Up @@ -408,6 +410,31 @@ resource "google_compute_address" "sap_hana_worker_ip" {
address = length(var.worker_static_ips) > count.index ? var.worker_static_ips[count.index] : ""
}

################################################################################
# Sole tenant items
################################################################################
resource "google_compute_node_template" "sole_tenant_node_template" {
count = var.sole_tenant_deployment ? 1 : 0
name = "${local.sole_tenant_name_prefix}-node-template"
node_type = var.sole_tenant_node_type
region = local.region
project = var.project_id
}

resource "google_compute_node_group" "sole_tenant_node_group" {
count = var.sole_tenant_deployment ? 1 : 0
name = "${local.sole_tenant_name_prefix}-node-group"
node_template = google_compute_node_template.sole_tenant_node_template[0].name
zone = var.zone
project = var.project_id
initial_size = 1
autoscaling_policy {
mode = "ON"
min_nodes = 1
max_nodes = var.sap_hana_scaleout_nodes + 1
}
}

################################################################################
# instances
################################################################################
Expand All @@ -425,9 +452,17 @@ resource "google_compute_instance" "sap_hana_primary_instance" {
}

dynamic "scheduling" {
for_each = local.native_bm ? [1] : []
for_each = (local.native_bm || var.sole_tenant_deployment) ? [1] : []
content {
on_host_maintenance = "TERMINATE"
on_host_maintenance = local.native_bm ? "TERMINATE" : null
dynamic "node_affinities" {
for_each = resource.google_compute_node_group.sole_tenant_node_group != null ? [1] : []
content {
key = "compute.googleapis.com/node-group-name"
operator = "IN"
values = ["${local.sole_tenant_name_prefix}-node-group"]
}
}
}
}

Expand Down Expand Up @@ -503,7 +538,6 @@ resource "google_compute_instance" "sap_hana_primary_instance" {
]
}


dynamic "reservation_affinity" {
for_each = length(var.reservation_name) > 1 ? [1] : []
content {
Expand Down Expand Up @@ -564,9 +598,17 @@ resource "google_compute_instance" "sap_hana_worker_instances" {
}

dynamic "scheduling" {
for_each = local.native_bm ? [1] : []
for_each = (local.native_bm || var.sole_tenant_deployment) ? [1] : []
content {
on_host_maintenance = "TERMINATE"
on_host_maintenance = local.native_bm ? "TERMINATE" : null
dynamic "node_affinities" {
for_each = resource.google_compute_node_group.sole_tenant_node_group != null ? [1] : []
content {
key = "compute.googleapis.com/node-group-name"
operator = "IN"
values = ["${local.sole_tenant_name_prefix}-node-group"]
}
}
}
}

Expand Down
4 changes: 4 additions & 0 deletions modules/sap_hana/sap_hana.tf
Original file line number Diff line number Diff line change
Expand Up @@ -77,4 +77,8 @@ module "sap_hana" {
# backup_disk_type = "DISK_TYPE" # default is pd-ssd, except for machines that do not support PD, in which case the default is hyperdisk-extreme. Valid types are "pd-ssd", "pd-balanced", "pd-standard", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme".
# enable_fast_restart = true_or_false # default is true, whether to enable HANA Fast Restart
# enable_data_striping = true_or_false # default is false. Enable LVM striping of data volume across multiple disks. Data striping is only intended for cases where the machine level limits are higher than the hyperdisk disk level limits. Refer to https://cloud.google.com/compute/docs/disks/hyperdisks#hd-performance-limits

# sole_tenant_deployment = true_or_false # default is false. Whether to deploy on Sole Tenant Nodes.
# sole_tenant_node_type = "NODE_TYPE" # Sole Tenant Node Type to use. See https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_types"
# sole_tenant_name_prefix = "PREFIX" # name of the prefix to use for the Sole Tenant objects (Node Templates, Node Groups). If left blank with sole_tenant_deployment=true, st-<sap_hana_sid> will be used.
}
18 changes: 18 additions & 0 deletions modules/sap_hana/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -344,6 +344,24 @@ variable "enable_data_striping" {
default = false
}

variable "sole_tenant_deployment" {
type = bool
description = "Optional - default is false. Deploy on Sole Tenant Nodes."
default = false
}

variable "sole_tenant_node_type" {
type = string
description = "Optional - default is null. Sole Tenant Node Type to use. See https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_types"
default = null
}

variable "sole_tenant_name_prefix" {
type = string
description = "Optional - name of the prefix to use for the Sole Tenant objects (Node Templates, Node Groups). If left blank with sole_tenant_deployment=true, st-<sap_hana_sid> will be used."
default = ""
}

#
# DO NOT MODIFY unless instructed or aware of the implications of using those settings
#
Expand Down
88 changes: 80 additions & 8 deletions modules/sap_hana_ha/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,8 @@ locals {
num_data_disks = var.enable_data_striping ? var.number_data_disks : 1
num_log_disks = var.enable_log_striping ? var.number_log_disks : 1

sole_tenant_name_prefix = var.sole_tenant_name_prefix != "" ? var.sole_tenant_name_prefix : "st-${lower(var.sap_hana_sid)}"
# Minimum disk sizes are used to ensure throughput. Extreme disks don't need this.
# All 'over provisioned' capacity is to go onto the data disk.
final_disk_type = var.disk_type == "" ? (local.default_hyperdisk_extreme ? "hyperdisk-extreme" : (local.default_hyperdisk_balanced ? "hyperdisk-balanced" : "pd-ssd")) : var.disk_type
Expand Down Expand Up @@ -357,6 +359,44 @@ resource "google_compute_address" "sap_hana_ha_worker_vm_ip" {
length(var.secondary_worker_static_ips) > floor(count.index / 2) ? var.secondary_worker_static_ips[floor(count.index / 2)] : "")
}

################################################################################
# Sole tenant items
################################################################################
resource "google_compute_node_template" "sole_tenant_node_template" {
count = var.sole_tenant_deployment ? 1 : 0
name = "${local.sole_tenant_name_prefix}-node-template"
node_type = var.sole_tenant_node_type
region = local.region
project = var.project_id
}

resource "google_compute_node_group" "sole_tenant_primary_node_group" {
count = var.sole_tenant_deployment ? 1 : 0
name = "${local.sole_tenant_name_prefix}-primary-node-group"
node_template = google_compute_node_template.sole_tenant_node_template[0].name
zone = var.primary_zone
project = var.project_id
initial_size = 1
autoscaling_policy {
mode = "ON"
min_nodes = 1
max_nodes = var.sap_hana_scaleout_nodes + 1
}
}

resource "google_compute_node_group" "sole_tenant_secondary_node_group" {
count = var.sole_tenant_deployment ? 1 : 0
name = "${local.sole_tenant_name_prefix}-secondary-node-group"
node_template = google_compute_node_template.sole_tenant_node_template[0].name
zone = var.secondary_zone
project = var.project_id
initial_size = 1
autoscaling_policy {
mode = "ON"
min_nodes = 1
max_nodes = var.sap_hana_scaleout_nodes + 1
}
}
################################################################################
# Primary Instance
################################################################################
Expand Down Expand Up @@ -472,9 +512,17 @@ resource "google_compute_instance" "sap_hana_ha_primary_instance" {
}

dynamic "scheduling" {
for_each = local.native_bm ? [1] : []
for_each = (local.native_bm || var.sole_tenant_deployment) ? [1] : []
content {
on_host_maintenance = "TERMINATE"
on_host_maintenance = local.native_bm ? "TERMINATE" : null
dynamic "node_affinities" {
for_each = resource.google_compute_node_group.sole_tenant_primary_node_group != null ? [1] : []
content {
key = "compute.googleapis.com/node-group-name"
operator = "IN"
values = ["${local.sole_tenant_name_prefix}-primary-node-group"]
}
}
}
}

Expand Down Expand Up @@ -624,9 +672,17 @@ resource "google_compute_instance" "sap_hana_ha_primary_workers" {
}

dynamic "scheduling" {
for_each = local.native_bm ? [1] : []
for_each = (local.native_bm || var.sole_tenant_deployment) ? [1] : []
content {
on_host_maintenance = "TERMINATE"
on_host_maintenance = local.native_bm ? "TERMINATE" : null
dynamic "node_affinities" {
for_each = resource.google_compute_node_group.sole_tenant_primary_node_group != null ? [1] : []
content {
key = "compute.googleapis.com/node-group-name"
operator = "IN"
values = ["${local.sole_tenant_name_prefix}-primary-node-group"]
}
}
}
}

Expand Down Expand Up @@ -857,9 +913,17 @@ resource "google_compute_instance" "sap_hana_ha_secondary_instance" {
}

dynamic "scheduling" {
for_each = local.native_bm ? [1] : []
for_each = (local.native_bm || var.sole_tenant_deployment) ? [1] : []
content {
on_host_maintenance = "TERMINATE"
on_host_maintenance = local.native_bm ? "TERMINATE" : null
dynamic "node_affinities" {
for_each = resource.google_compute_node_group.sole_tenant_secondary_node_group != null ? [1] : []
content {
key = "compute.googleapis.com/node-group-name"
operator = "IN"
values = ["${local.sole_tenant_name_prefix}-secondary-node-group"]
}
}
}
}

Expand Down Expand Up @@ -1008,9 +1072,17 @@ resource "google_compute_instance" "sap_hana_ha_secondary_workers" {
}

dynamic "scheduling" {
for_each = local.native_bm ? [1] : []
for_each = (local.native_bm || var.sole_tenant_deployment) ? [1] : []
content {
on_host_maintenance = "TERMINATE"
on_host_maintenance = local.native_bm ? "TERMINATE" : null
dynamic "node_affinities" {
for_each = resource.google_compute_node_group.sole_tenant_secondary_node_group != null ? [1] : []
content {
key = "compute.googleapis.com/node-group-name"
operator = "IN"
values = ["${local.sole_tenant_name_prefix}-secondary-node-group"]
}
}
}
}

Expand Down
4 changes: 4 additions & 0 deletions modules/sap_hana_ha/sap_hana_ha.tf
Original file line number Diff line number Diff line change
Expand Up @@ -92,4 +92,8 @@ module "sap_hana_ha" {
# backup_disk_type = "DISK_TYPE" # default is pd-ssd, except for machines that do not support PD, in which case the default is hyperdisk-extreme. Valid types are "pd-ssd", "pd-balanced", "pd-standard", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme".
# enable_fast_restart = true_or_false # default is true, whether to enable HANA Fast Restart
# enable_data_striping = true_or_false # default is false. Enable LVM striping of data volume across multiple disks. Data striping is only intended for cases where the machine level limits are higher than the hyperdisk disk level limits. Refer to https://cloud.google.com/compute/docs/disks/hyperdisks#hd-performance-limits

# sole_tenant_deployment = true_or_false # default is false. Whether to deploy on Sole Tenant Nodes.
# sole_tenant_node_type = "NODE_TYPE" # Sole Tenant Node Type to use. See https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_types"
# sole_tenant_name_prefix = "PREFIX" # name of the prefix to use for the Sole Tenant objects (Node Templates, Node Groups). If left blank with sole_tenant_deployment=true, st-<sap_hana_sid> will be used.
}
18 changes: 18 additions & 0 deletions modules/sap_hana_ha/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -390,6 +390,24 @@ variable "enable_data_striping" {
default = false
}

variable "sole_tenant_deployment" {
type = bool
description = "Optional - default is false. Deploy on Sole Tenant Nodes."
default = false
}

variable "sole_tenant_node_type" {
type = string
description = "Optional - default is null. Sole Tenant Node Type to use. See https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_types"
default = null
}

variable "sole_tenant_name_prefix" {
type = string
description = "Optional - name of the prefix to use for the Sole Tenant objects (Node Templates, Node Groups). If left blank with sole_tenant_deployment=true, st-<sap_hana_sid> will be used."
default = ""
}

#
# DO NOT MODIFY unless instructed or aware of the implications of using those settings
#
Expand Down

0 comments on commit 01d6870

Please sign in to comment.