diff --git a/examples/sap_nw_ha_simple/README.md b/examples/sap_nw_ha_simple/README.md new file mode 100644 index 00000000..85b3f895 --- /dev/null +++ b/examples/sap_nw_ha_simple/README.md @@ -0,0 +1,26 @@ +# SAP HANA HA example + +This example illustrates how to use the latest release of the terraform module for SAP on Google Cloud +for provisioning SAP HANA with HA + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| project\_id | Project id where the instances will be created. | `string` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| sap\_nw\_ha\_ers\_instance\_self\_link | Self-link for the primary SAP NW instance created. | +| sap\_nw\_ha\_scs\_instance\_self\_link | Self-link for the primary SAP NW instance created. | + + + +To provision this example, run the following from within this directory: +- `terraform init` to get the plugins +- `terraform plan` to see the infrastructure plan +- `terraform apply` to apply the infrastructure build +- `terraform destroy` to destroy the built infrastructure diff --git a/examples/sap_nw_ha_simple/main.tf b/examples/sap_nw_ha_simple/main.tf new file mode 100644 index 00000000..414d396d --- /dev/null +++ b/examples/sap_nw_ha_simple/main.tf @@ -0,0 +1,36 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +module "sap_nw_ha" { + source = "terraform-google-modules/sap/google//modules/sap_nw_ha" + version = "~> 1.0" + + project_id = var.project_id + machine_type = "n1-highmem-32" + network = "default" + subnetwork = "default" + linux_image = "sles-15-sp2-sap" + linux_image_project = "suse-sap-cloud" + + sap_primary_instance = "prd-nw1" + sap_primary_zone = "us-central1-b" + + sap_secondary_instance = "prd-nw2" + sap_secondary_zone = "us-central1-c" + + nfs_path = "1.2.3.4:/my_path" + sap_sid = "PE1" +} diff --git a/examples/sap_nw_ha_simple/outputs.tf b/examples/sap_nw_ha_simple/outputs.tf new file mode 100644 index 00000000..79e040c7 --- /dev/null +++ b/examples/sap_nw_ha_simple/outputs.tf @@ -0,0 +1,23 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +output "sap_nw_ha_scs_instance_self_link" { + description = "Self-link for the primary SAP NW instance created." + value = module.sap_nw_ha.scs_instance +} +output "sap_nw_ha_ers_instance_self_link" { + description = "Self-link for the primary SAP NW instance created." + value = module.sap_nw_ha.ers_instance +} diff --git a/examples/sap_nw_ha_simple/variables.tf b/examples/sap_nw_ha_simple/variables.tf new file mode 100644 index 00000000..53c86a8e --- /dev/null +++ b/examples/sap_nw_ha_simple/variables.tf @@ -0,0 +1,20 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "project_id" { + type = string + description = "Project id where the instances will be created." +} diff --git a/examples/sap_nw_simple/README.md b/examples/sap_nw_simple/README.md new file mode 100644 index 00000000..dc28bfc6 --- /dev/null +++ b/examples/sap_nw_simple/README.md @@ -0,0 +1,25 @@ +# SAP HANA HA example + +This example illustrates how to use the latest release of the terraform module for SAP on Google Cloud +for provisioning SAP HANA with HA + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| project\_id | Project id where the instances will be created. | `string` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| sap\_nw\_primary\_instance\_self\_link | Self-link for the primary SAP NW instance created. | + + + +To provision this example, run the following from within this directory: +- `terraform init` to get the plugins +- `terraform plan` to see the infrastructure plan +- `terraform apply` to apply the infrastructure build +- `terraform destroy` to destroy the built infrastructure diff --git a/examples/sap_nw_simple/main.tf b/examples/sap_nw_simple/main.tf new file mode 100644 index 00000000..ba56545d --- /dev/null +++ b/examples/sap_nw_simple/main.tf @@ -0,0 +1,28 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +module "sap_nw" { + source = "terraform-google-modules/sap/google//modules/sap_nw" + version = "~> 1.0" + + project_id = var.project_id + zone = "us-east1-b" + machine_type = "n1-highmem-32" + subnetwork = "default" + linux_image = "rhel-8-4-sap-ha" + linux_image_project = "rhel-sap-cloud" + instance_name = "nw-instance" +} diff --git a/examples/sap_nw_simple/outputs.tf b/examples/sap_nw_simple/outputs.tf new file mode 100644 index 00000000..f4d5bac3 --- /dev/null +++ b/examples/sap_nw_simple/outputs.tf @@ -0,0 +1,19 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +output "sap_nw_primary_instance_self_link" { + description = "Self-link for the primary SAP NW instance created." + value = module.sap_nw.sap_nw_self_link +} diff --git a/examples/sap_nw_simple/variables.tf b/examples/sap_nw_simple/variables.tf new file mode 100644 index 00000000..53c86a8e --- /dev/null +++ b/examples/sap_nw_simple/variables.tf @@ -0,0 +1,20 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "project_id" { + type = string + description = "Project id where the instances will be created." +} diff --git a/modules/sap_hana/README.md b/modules/sap_hana/README.md index 00bab516..3423d12b 100644 --- a/modules/sap_hana/README.md +++ b/modules/sap_hana/README.md @@ -4,60 +4,107 @@ This template follows the documented steps https://cloud.google.com/solutions/sap/docs/certifications-sap-hana and deploys GCP and Pacemaker resources up to the installation of SAP's central services. -## Usage +## Set up Terraform -Basic usage of this module is as follows: +Install Terraform on the machine you would like to use to deploy from by +following +https://learn.hashicorp.com/tutorials/terraform/install-cli?in=terraform/gcp-get-started#install-terraform -```hcl -module "sap_hana" { - source = "terraform-google-modules/sap/google//modules/sap_hana" - version = "~> 1.1" +## How to deploy - project_id = "PROJECT_ID" # example: my-project-x - zone = "ZONE" # example: us-east1-b - machine_type = "MACHINE_TYPE" # example: n1-highmem-32 - subnetwork = "SUBNETWORK" # example: default - linux_image = "LINUX_IMAGE" # example: rhel-8-4-sap-ha - linux_image_project = "LINUX_IMAGE_PROJECT" # example: rhel-sap-cloud - instance_name = "VM_NAME" # example: hana_instance - sap_hana_sid = "SID" # example: ABC, Must conform to [a-zA-Z][a-zA-Z0-9]{2} -} -``` +1. Download .tf file into an empty directory `curl + https://storage.googleapis.com/cloudsapdeploy/terraform/latest/terraform/sap_hana/terraform/sap_hana.tf + -o sap_hana.tf` -Functional example is included in the -[examples](../../examples/sap_hana_simple) directory. +2. Fill in mandatory variables and if the desired optional variable in the .tf + file. + +3. Deploy + + 1. Run `terraform init` (only needed once) + 2. Run `terraform plan` to see what is going to be deployed. Verify if + names, zones, sizes, etc. are as desired. + 3. Run `terrafom apply` to deploy the resources + 4. Run `terrafom destroy` to remove the resources + +4. Continue installation of SAP software and setup of remaining cluster + resources as per documentation at + https://cloud.google.com/solutions/sap/docs/sap-hana-deployment-guide-tf + +## Additional information + +For additional information see https://www.terraform.io/docs/index.html and +https://cloud.google.com/docs/terraform ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| +| backup\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the backup disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| backup\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the backup disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| backup\_disk\_type | Optional - The default is hyperdisk-balanced for native bare metal machines and pd-balanced otherwise, only used if a backup disk is needed. | `string` | `""` | no | | can\_ip\_forward | Whether sending and receiving of packets with non-matching source or destination IPs is allowed. | `bool` | `true` | no | +| data\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the data disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| data\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the data disk(s), that is based off of the machine\_type. | `number` | `null` | no | +| data\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the data disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| data\_disk\_type\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default\_disk\_type' for the data disk. | `string` | `""` | no | +| disk\_type | Optional - The default disk type to use for disk(s) containing log and data volumes. The default is hyperdisk-extreme for native bare metal machines and pd-ssd otherwise. Not all disk are supported on all machine types - see https://cloud.google.com/compute/docs/disks/ for details. | `string` | `""` | no | +| enable\_fast\_restart | Optional - The default is true. If set enables HANA Fast Restart. | `bool` | `true` | no | +| hyperdisk\_balanced\_iops\_default | Optional - default is 3000. Number of IOPS that is set for each disk of type Hyperdisk-balanced (except for boot/usrsap/shared disks). | `number` | `3000` | no | +| hyperdisk\_balanced\_throughput\_default | Optional - default is 750. Throughput in MB/s that is set for each disk of type Hyperdisk-balanced (except for boot/usrsap/shared disks). | `number` | `750` | no | +| include\_backup\_disk | Optional - The default is true. If set creates a disk for backups. | `bool` | `true` | no | | instance\_name | Hostname of the GCE instance. | `string` | n/a | yes | | linux\_image | Linux image name to use. | `string` | n/a | yes | | linux\_image\_project | The project which the Linux image belongs to. | `string` | n/a | yes | +| log\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the log disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| log\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the log disk(s), that is based off of the machine\_type. | `number` | `null` | no | +| log\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the log disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| log\_disk\_type\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default\_disk\_type' for the log disk. | `string` | `""` | no | | machine\_type | Machine type for the instances. | `string` | n/a | yes | | network\_tags | OPTIONAL - Network tags can be associated to your instance on deployment. This can be used for firewalling or routing purposes. | `list(string)` | `[]` | no | +| nic\_type | Optional - This value determines the type of NIC to use, valid options are GVNIC and VIRTIO\_NET. If choosing GVNIC make sure that it is supported by your OS choice here https://cloud.google.com/compute/docs/images/os-details#networking. | `string` | `""` | no | | post\_deployment\_script | OPTIONAL - gs:// or https:// location of a script to execute on the created VM's post deployment. | `string` | `""` | no | -| primary\_startup\_url | Startup script to be executed when the VM boots, should not be overridden. | `string` | `"curl -s BUILD.TERRA_SH_URL/sap_hana/startup.sh | bash -s BUILD.TERRA_SH_URL"` | no | +| primary\_startup\_url | Startup script to be executed when the VM boots, should not be overridden. | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform"` | no | | project\_id | Project id where the instances will be created. | `string` | n/a | yes | | public\_ip | OPTIONAL - Defines whether a public IP address should be added to your VM. By default this is set to Yes. Note that if you set this to No without appropriate network nat and tags in place, there will be no route to the internet and thus the installation will fail. | `bool` | `true` | no | | reservation\_name | Use a reservation specified by RESERVATION\_NAME.
By default ANY\_RESERVATION is used when this variable is empty.
In order for a reservation to be used it must be created with the
"Select specific reservation" selected (specificReservationRequired set to true)
Be sure to create your reservation with the correct Min CPU Platform for the
following instance types:
n1-highmem-32 : Intel Broadwell
n1-highmem-64 : Intel Broadwell
n1-highmem-96 : Intel Skylake
n1-megamem-96 : Intel Skylake
m1-megamem-96 : Intel Skylake
All other instance types can have automatic Min CPU Platform" | `string` | `""` | no | | sap\_deployment\_debug | OPTIONAL - If this value is set to true, the deployment will generates verbose deployment logs. Only turn this setting on if a Google support engineer asks you to enable debugging. | `bool` | `false` | no | -| sap\_hana\_backup\_size | Size in GB of the /hanabackup volume. If this is not set or set to zero, the GCE instance will be provisioned with a hana backup volume of 2 times the total memory. | `number` | `0` | no | -| sap\_hana\_deployment\_bucket | The GCS bucket containing the SAP HANA media. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. | `string` | `""` | no | +| sap\_hana\_backup\_nfs | NFS endpoint for /hanabackup storage. | `string` | `""` | no | +| sap\_hana\_backup\_nfs\_resource | NFS resource to be used as the backup drive instead of a disk. This and sap\_hana\_backup\_nfs may not both be set. |
object({
networks = list(
object({
ip_addresses = list(string)
})
)
file_shares = list(
object({
name = string
})
)

})
| `null` | no | +| sap\_hana\_backup\_size | Size in GB of the /hanabackup volume. If this is not set or set to zero, the GCE instance will be provisioned with a hana backup volume of 2 times the total memory. If sap\_hana\_backup\_nfs is set, this setting is ignored. | `number` | `0` | no | +| sap\_hana\_deployment\_bucket | The Cloud Storage path that contains the SAP HANA media, do not include gs://. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. | `string` | `""` | no | | sap\_hana\_instance\_number | The SAP instance number. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. | `number` | `0` | no | | sap\_hana\_sapsys\_gid | The Linux GID of the SAPSYS group. By default this is set to 79 | `number` | `79` | no | | sap\_hana\_scaleout\_nodes | Number of additional nodes to add. E.g - if you wish for a 4 node cluster you would specify 3 here. | `number` | `0` | no | +| sap\_hana\_shared\_nfs | NFS endpoint for /hana/shared storage. | `string` | `""` | no | +| sap\_hana\_shared\_nfs\_resource | NFS resource to be used as the shared drive instead of a disk. This and sap\_hana\_shared\_nfs may not both be set. |
object({
networks = list(
object({
ip_addresses = list(string)
})
)
file_shares = list(
object({
name = string
})
)

})
| `null` | no | | sap\_hana\_sid | The SAP HANA SID. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. SID must adhere to SAP standard (Three letters or numbers and start with a letter) | `string` | n/a | yes | | sap\_hana\_sidadm\_password | The linux sidadm login password. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. Minimum requirement is 8 characters. | `string` | `""` | no | | sap\_hana\_sidadm\_password\_secret | The secret key used to retrieve the linux sidadm login from Secret Manager (https://cloud.google.com/secret-manager). The Secret Manager password will overwrite the clear text password from sap\_hana\_sidadm\_password if both are set. | `string` | `""` | no | | sap\_hana\_sidadm\_uid | The Linux UID of the adm user. By default this is set to 900 to avoid conflicting with other OS users. | `number` | `900` | no | | sap\_hana\_system\_password | The SAP HANA SYSTEM password. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. Minimum requirement is 8 characters with at least 1 number. | `string` | `""` | no | | sap\_hana\_system\_password\_secret | The secret key used to retrieve the SAP HANA SYSTEM login from Secret Manager (https://cloud.google.com/secret-manager). The Secret Manager password will overwrite the clear text password from sap\_hana\_system\_password if both are set. | `string` | `""` | no | -| secondary\_startup\_url | DO NOT USE | `string` | `"curl -s BUILD.TERRA_SH_URL/sap_hana/startup_secondary.sh | bash -s BUILD.TERRA_SH_URL"` | no | +| secondary\_startup\_url | DO NOT USE | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana/startup_secondary.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform"` | no | | service\_account | OPTIONAL - Ability to define a custom service account instead of using the default project service account. | `string` | `""` | no | +| shared\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the shared disk will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| shared\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the shared disk, that is based off of the machine\_type. | `number` | `null` | no | +| shared\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the shared disk will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| shared\_disk\_type\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default\_disk\_type' for the shared disk. | `string` | `""` | no | | subnetwork | The sub network to deploy the instance in. | `string` | n/a | yes | +| unified\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the primary's unified disk will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| unified\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the primary's unified disk, that is based off of the machine\_type. | `number` | `null` | no | +| unified\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the primary's unified disk will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| unified\_worker\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the unified worker disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| unified\_worker\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the unified worker disk(s), that is based off of the machine\_type. | `number` | `null` | no | +| unified\_worker\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the unified worker disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| use\_single\_shared\_data\_log\_disk | Optional - By default three separate disk for data, logs, and shared will be made. If set to true, one disk will be used instead. | `bool` | `false` | no | +| usrsap\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the /usr/sap disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| usrsap\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the /usr/sap disk(s), that is based off of the machine\_type. | `number` | `null` | no | +| usrsap\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the /usr/sap disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| usrsap\_disk\_type\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default\_disk\_type' for the /usr/sap disk. | `string` | `""` | no | +| vm\_static\_ip | Optional - Defines an internal static IP for the VM. | `string` | `""` | no | +| worker\_static\_ips | Optional - Defines internal static IP addresses for the worker nodes. | `list(string)` | `[]` | no | | zone | Zone where the instances will be created. | `string` | n/a | yes | ## Outputs diff --git a/modules/sap_hana/main.tf b/modules/sap_hana/main.tf index 5cb90467..96e1323c 100644 --- a/modules/sap_hana/main.tf +++ b/modules/sap_hana/main.tf @@ -13,12 +13,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - # # Terraform SAP HANA for Google Cloud # -# Version: BUILD.VERSION -# Build Hash: BUILD.HASH +# Version: 2.0.202404101403 +# Build Hash: c1f78e4d8c44de3be18fc7b3a64ccf60a94a85bc # ################################################################################ @@ -26,79 +25,209 @@ ################################################################################ locals { mem_size_map = { - "n1-highmem-32" = 208 - "n1-highmem-64" = 416 - "n1-highmem-96" = 624 - "n1-megamem-96" = 1433 - "n2-highmem-32" = 256 - "n2-highmem-48" = 386 - "n2-highmem-64" = 512 - "n2-highmem-80" = 640 - "n2-highmem-96" = 768 - "n2-highmem-128" = 864 - "n1-ultramem-40" = 961 - "n1-ultramem-80" = 1922 - "n1-ultramem-160" = 3844 - "m1-megamem-96" = 1433 - "m1-ultramem-40" = 961 - "m1-ultramem-80" = 1922 - "m1-ultramem-160" = 3844 - "m2-ultramem-208" = 5916 - "m2-megamem-416" = 5916 - "m2-hypermem-416" = 8832 - "m2-ultramem-416" = 11832 - "m3-megamem-64" = 976 - "m3-megamem-128" = 1952 - "m3-ultramem-32" = 976 - "m3-ultramem-64" = 1952 - "m3-ultramem-128" = 3904 + "n1-highmem-32" = 208 + "n1-highmem-64" = 416 + "n1-highmem-96" = 624 + "n1-megamem-96" = 1433 + "n2-highmem-32" = 256 + "n2-highmem-48" = 384 + "n2-highmem-64" = 512 + "n2-highmem-80" = 640 + "n2-highmem-96" = 768 + "n2-highmem-128" = 864 + "n1-ultramem-40" = 961 + "n1-ultramem-80" = 1922 + "n1-ultramem-160" = 3844 + "m1-megamem-96" = 1433 + "m1-ultramem-40" = 961 + "m1-ultramem-80" = 1922 + "m1-ultramem-160" = 3844 + "m2-ultramem-208" = 5888 + "m2-megamem-416" = 5888 + "m2-hypermem-416" = 8832 + "m2-ultramem-416" = 11744 + "m3-megamem-64" = 976 + "m3-megamem-128" = 1952 + "m3-ultramem-32" = 976 + "m3-ultramem-64" = 1952 + "m3-ultramem-128" = 3904 + "c3-standard-44" = 176 + "c3-highmem-44" = 352 + "c3-highmem-88" = 704 + "c3-highmem-176" = 1408 + "c3-standard-192-metal" = 768 + "c3-highcpu-192-metal" = 512 + "c3-highmem-192-metal" = 1536 + "x4-megamem-960-metal" = 16384 + "x4-megamem-1440-metal" = 24576 + "x4-megamem-1920-metal" = 32768 } + cpu_platform_map = { - "n1-standard-16" = "Intel Haswell" - "n1-highmem-32" = "Intel Broadwell" - "n1-highmem-64" = "Intel Broadwell" - "n1-highmem-96" = "Intel Skylake" - "n1-megamem-96" = "Intel Skylake" - "n2-highmem-32" = "Automatic" - "n2-highmem-48" = "Automatic" - "n2-highmem-64" = "Automatic" - "n2-highmem-80" = "Automatic" - "n2-highmem-96" = "Automatic" - "n2-highmem-128" = "Automatic" - "n1-ultramem-40" = "Automatic" - "n1-ultramem-80" = "Automatic" - "n1-ultramem-160" = "Automatic" - "m1-megamem-96" = "Intel Skylake" - "m1-ultramem-40" = "Automatic" - "m1-ultramem-80" = "Automatic" - "m1-ultramem-160" = "Automatic" - "m2-ultramem-208" = "Automatic" - "m2-megamem-416" = "Automatic" - "m2-hypermem-416" = "Automatic" - "m2-ultramem-416" = "Automatic" - "m3-megamem-64" = "Automatic" - "m3-megamem-128" = "Automatic" - "m3-ultramem-32" = "Automatic" - "m3-ultramem-64" = "Automatic" - "m3-ultramem-128" = "Automatic" + "n1-highmem-32" = "Intel Broadwell" + "n1-highmem-64" = "Intel Broadwell" + "n1-highmem-96" = "Intel Skylake" + "n1-megamem-96" = "Intel Skylake" + "n2-highmem-32" = "Automatic" + "n2-highmem-48" = "Automatic" + "n2-highmem-64" = "Automatic" + "n2-highmem-80" = "Automatic" + "n2-highmem-96" = "Automatic" + "n2-highmem-128" = "Automatic" + "n1-ultramem-40" = "Automatic" + "n1-ultramem-80" = "Automatic" + "n1-ultramem-160" = "Automatic" + "m1-megamem-96" = "Intel Skylake" + "m1-ultramem-40" = "Automatic" + "m1-ultramem-80" = "Automatic" + "m1-ultramem-160" = "Automatic" + "m2-ultramem-208" = "Automatic" + "m2-megamem-416" = "Automatic" + "m2-hypermem-416" = "Automatic" + "m2-ultramem-416" = "Automatic" + "m3-megamem-64" = "Automatic" + "m3-megamem-128" = "Automatic" + "m3-ultramem-32" = "Automatic" + "m3-ultramem-64" = "Automatic" + "m3-ultramem-128" = "Automatic" + "c3-standard-44" = "Automatic" + "c3-highmem-44" = "Automatic" + "c3-highmem-88" = "Automatic" + "c3-highmem-176" = "Automatic" + "c3-standard-192-metal" = "Automatic" + "c3-highcpu-192-metal" = "Automatic" + "c3-highmem-192-metal" = "Automatic" + "x4-megamem-960-metal" = "Automatic" + "x4-megamem-1440-metal" = "Automatic" + "x4-megamem-1920-metal" = "Automatic" } - mem_size = lookup(local.mem_size_map, var.machine_type, 320) - hana_log_size_min = min(512, max(64, local.mem_size / 2)) - hana_data_size_min = local.mem_size * 12 / 10 - hana_shared_size_min = min(1024, local.mem_size) - hana_log_size = local.hana_log_size_min - hana_data_size = local.hana_data_size_min + native_bm = length(regexall("metal", var.machine_type)) > 0 + + # Minimum disk sizes are used to ensure throughput. Extreme disks don't need this. + # All 'over provisioned' capacity is to go onto the data disk. + final_disk_type = var.disk_type == "" ? (local.native_bm ? "hyperdisk-extreme" : "pd-ssd") : var.disk_type + min_total_disk_map = { + "pd-ssd" = 550 + "pd-balanced" = 943 + "pd-extreme" = 0 + "hyperdisk-balanced" = 0 + "hyperdisk-extreme" = 0 + } + min_total_disk = local.min_total_disk_map[local.final_disk_type] - # scaleout_nodes > 0 then hana_shared_size and pdhdd is changed; assumes that sap_hana_scaleout_nodes is an interger - hana_shared_size = local.hana_shared_size_min * (var.sap_hana_scaleout_nodes > 0 ? ceil(var.sap_hana_scaleout_nodes / 4) : 1) - pdhdd_size_default = var.sap_hana_scaleout_nodes > 0 ? 2 * local.mem_size * (var.sap_hana_scaleout_nodes + 1) : 500 + mem_size = lookup(local.mem_size_map, var.machine_type, 320) + hana_log_size = ceil(min(512, max(64, local.mem_size / 2))) + hana_data_size_min = ceil(local.mem_size * 12 / 10) + hana_shared_size_min = min(1024, local.mem_size) + hana_usrsap_size = 32 + + hana_data_size = max(local.hana_data_size_min, local.min_total_disk - local.hana_usrsap_size - local.hana_log_size - local.hana_shared_size_min) + + # scaleout_nodes > 0 then hana_shared_size and backup is changed; assumes that sap_hana_scaleout_nodes is an integer + hana_shared_size = var.sap_hana_scaleout_nodes > 0 ? local.hana_shared_size_min * ceil(var.sap_hana_scaleout_nodes / 4) : local.hana_shared_size_min + backup_size = var.sap_hana_backup_size > 0 ? var.sap_hana_backup_size : 2 * local.mem_size * (var.sap_hana_scaleout_nodes + 1) + + # ensure the combined disk meets minimum size/performance ; + pd_size = ceil(max(local.min_total_disk, local.hana_log_size + local.hana_data_size_min + local.hana_shared_size + local.hana_usrsap_size + 1)) + + # ensure pd-hdd for backup is smaller than the maximum pd size + pd_size_worker = ceil(max(local.min_total_disk, local.hana_log_size + local.hana_data_size_min + local.hana_usrsap_size + 1)) + + unified_pd_size = var.unified_disk_size_override == null ? local.pd_size : var.unified_disk_size_override + unified_worker_pd_size = var.unified_worker_disk_size_override == null ? local.pd_size_worker : var.unified_worker_disk_size_override + data_pd_size = var.data_disk_size_override == null ? local.hana_data_size : var.data_disk_size_override + log_pd_size = var.log_disk_size_override == null ? local.hana_log_size : var.log_disk_size_override + shared_pd_size = var.shared_disk_size_override == null ? local.hana_shared_size : var.shared_disk_size_override + usrsap_pd_size = var.usrsap_disk_size_override == null ? local.hana_usrsap_size : var.usrsap_disk_size_override + + # Disk types + final_data_disk_type = var.data_disk_type_override == "" ? local.final_disk_type : var.data_disk_type_override + final_log_disk_type = var.log_disk_type_override == "" ? local.final_disk_type : var.log_disk_type_override + temp_shared_disk_type = local.native_bm ? "hyperdisk-balanced" : (contains(["hyperdisk-extreme", "hyperdisk-balanced", "pd-extreme"], local.final_disk_type) ? "pd-balanced" : local.final_disk_type) + temp_usrsap_disk_type = local.native_bm ? "hyperdisk-balanced" : (contains(["hyperdisk-extreme", "hyperdisk-balanced", "pd-extreme"], local.final_disk_type) ? "pd-balanced" : local.final_disk_type) + final_shared_disk_type = var.shared_disk_type_override == "" ? local.temp_shared_disk_type : var.shared_disk_type_override + final_usrsap_disk_type = var.usrsap_disk_type_override == "" ? local.temp_usrsap_disk_type : var.usrsap_disk_type_override + final_backup_disk_type = var.backup_disk_type == "" ? (local.native_bm ? "hyperdisk-balanced" : "pd-balanced") : var.backup_disk_type + + # Disk IOPS + hdx_iops_map = { + "data" = max(10000, local.data_pd_size * 2) + "log" = max(10000, local.log_pd_size * 2) + "shared" = null + "usrsap" = null + "unified" = max(10000, local.data_pd_size * 2) + max(10000, local.log_pd_size * 2) + "worker" = max(10000, local.data_pd_size * 2) + max(10000, local.log_pd_size * 2) + "backup" = max(10000, 2 * local.backup_size) + } + hdb_iops_map = { + "data" = var.hyperdisk_balanced_iops_default + "log" = var.hyperdisk_balanced_iops_default + "shared" = null + "usrsap" = null + "unified" = var.hyperdisk_balanced_iops_default + "worker" = var.hyperdisk_balanced_iops_default + "backup" = var.hyperdisk_balanced_iops_default + } + null_iops_map = { + "data" = null + "log" = null + "shared" = null + "usrsap" = null + "unified" = null + "worker" = null + "backup" = null + } + iops_map = { + "pd-ssd" = local.null_iops_map + "pd-balanced" = local.null_iops_map + "pd-extreme" = local.hdx_iops_map + "hyperdisk-balanced" = local.hdb_iops_map + "hyperdisk-extreme" = local.hdx_iops_map + } - # ensure pd-ssd meets minimum size/performance ; 32 is the min allowed memery and + 1 is there to make sure no undersizing happens - pdssd_size = ceil(max(834, local.hana_log_size + local.hana_data_size + local.hana_shared_size + 32 + 1)) + final_data_iops = var.data_disk_iops_override == null ? local.iops_map[local.final_data_disk_type]["data"] : var.data_disk_iops_override + final_log_iops = var.log_disk_iops_override == null ? local.iops_map[local.final_log_disk_type]["log"] : var.log_disk_iops_override + final_shared_iops = var.shared_disk_iops_override == null ? local.iops_map[local.final_shared_disk_type]["shared"] : var.shared_disk_iops_override + final_usrsap_iops = var.usrsap_disk_iops_override == null ? local.iops_map[local.final_usrsap_disk_type]["usrsap"] : var.usrsap_disk_iops_override + final_unified_iops = var.unified_disk_iops_override == null ? local.iops_map[local.final_disk_type]["unified"] : var.unified_disk_iops_override + final_unified_worker_iops = var.unified_worker_disk_iops_override == null ? local.iops_map[local.final_disk_type]["worker"] : var.unified_worker_disk_iops_override + final_backup_iops = var.backup_disk_iops_override == null ? local.iops_map[local.final_backup_disk_type]["backup"] : var.backup_disk_iops_override + + # Disk throughput MB/s + hdb_throughput_map = { + "data" = var.hyperdisk_balanced_throughput_default + "log" = var.hyperdisk_balanced_throughput_default + "shared" = null + "usrsap" = null + "unified" = var.hyperdisk_balanced_throughput_default + "worker" = var.hyperdisk_balanced_throughput_default + "backup" = var.hyperdisk_balanced_throughput_default + } + null_throughput_map = { + "data" = null + "log" = null + "shared" = null + "usrsap" = null + "unified" = null + "worker" = null + "backup" = null + } + throughput_map = { + "pd-ssd" = local.null_throughput_map + "pd-balanced" = local.null_throughput_map + "pd-extreme" = local.null_throughput_map + "hyperdisk-balanced" = local.hdb_throughput_map + "hyperdisk-extreme" = local.null_throughput_map + } - # change PD-HDD size if a custom backup size has been set - pdhdd_size = var.sap_hana_backup_size > 0 ? var.sap_hana_backup_size : local.pdhdd_size_default + final_data_throughput = var.data_disk_throughput_override == null ? local.throughput_map[local.final_data_disk_type]["data"] : var.data_disk_throughput_override + final_log_throughput = var.log_disk_throughput_override == null ? local.throughput_map[local.final_log_disk_type]["log"] : var.log_disk_throughput_override + final_shared_throughput = var.shared_disk_throughput_override == null ? local.throughput_map[local.final_shared_disk_type]["shared"] : var.shared_disk_throughput_override + final_usrsap_throughput = var.usrsap_disk_throughput_override == null ? local.throughput_map[local.final_usrsap_disk_type]["usrsap"] : var.usrsap_disk_throughput_override + final_unified_throughput = var.unified_disk_throughput_override == null ? local.throughput_map[local.final_disk_type]["unified"] : var.unified_disk_throughput_override + final_unified_worker_throughput = var.unified_worker_disk_throughput_override == null ? local.throughput_map[local.final_disk_type]["worker"] : var.unified_worker_disk_throughput_override + final_backup_throughput = var.backup_disk_throughput_override == null ? local.throughput_map[local.final_backup_disk_type]["backup"] : var.backup_disk_throughput_override # network config variables zone_split = split("-", var.zone) @@ -109,6 +238,44 @@ locals { "projects/${var.project_id}/regions/${local.region}/subnetworks/${var.subnetwork}") primary_startup_url = var.sap_deployment_debug ? replace(var.primary_startup_url, "bash -s", "bash -x -s") : var.primary_startup_url secondary_startup_url = var.sap_deployment_debug ? replace(var.secondary_startup_url, "bash -s", "bash -x -s") : var.secondary_startup_url + + has_shared_nfs = !(var.sap_hana_shared_nfs == "" && var.sap_hana_shared_nfs_resource == null) + make_shared_disk = !var.use_single_shared_data_log_disk && !local.has_shared_nfs + + use_backup_disk = (var.include_backup_disk && var.sap_hana_backup_nfs == "" && var.sap_hana_backup_nfs_resource == null) + + both_backup_nfs_defined = (var.sap_hana_backup_nfs != "") && var.sap_hana_backup_nfs_resource != null + both_shared_nfs_defined = (var.sap_hana_shared_nfs != "") && var.sap_hana_shared_nfs_resource != null + + backup_nfs_endpoint = var.sap_hana_backup_nfs_resource == null ? var.sap_hana_backup_nfs : "${var.sap_hana_backup_nfs_resource.networks[0].ip_addresses[0]}:/${var.sap_hana_backup_nfs_resource.file_shares[0].name}" + shared_nfs_endpoint = var.sap_hana_shared_nfs_resource == null ? var.sap_hana_shared_nfs : "${var.sap_hana_shared_nfs_resource.networks[0].ip_addresses[0]}:/${var.sap_hana_shared_nfs_resource.file_shares[0].name}" + +} + +# tflint-ignore: terraform_unused_declarations +data "assert_test" "one_backup" { + test = local.use_backup_disk || !local.both_backup_nfs_defined + throw = "Either use a disk for /backup (include_backup_disk) or use NFS. If using an NFS as /backup then only either sap_hana_backup_nfs or sap_hana_backup_nfs_resource may be defined." +} +# tflint-ignore: terraform_unused_declarations +data "assert_test" "one_shared" { + test = !local.both_shared_nfs_defined + throw = "If using an NFS as /shared then only either sap_hana_shared_nfs or sap_hana_shared_nfs_resource may be defined." +} +# tflint-ignore: terraform_unused_declarations +data "assert_test" "both_or_neither_nfs" { + test = (local.backup_nfs_endpoint == "") == (local.shared_nfs_endpoint == "") + throw = "If either NFS is defined, then both /shared and /backup must be defined." +} +# tflint-ignore: terraform_unused_declarations +data "assert_test" "hyperdisk_with_native_bm" { + test = local.native_bm ? length(regexall("hyperdisk", local.final_disk_type)) > 0 : true + throw = "Native bare metal machines only work with hyperdisks. Set 'disk_type' accordingly, e.g. 'disk_type = hyperdisk-balanced'" +} +# tflint-ignore: terraform_unused_declarations +data "assert_test" "backup_hyperdisk_with_native_bm" { + test = local.native_bm && local.use_backup_disk ? (length(regexall("hyperdisk", local.final_backup_disk_type)) > 0) : true + throw = "Native bare metal machines only work with hyperdisks. Set 'backup_disk_type' accordingly, e.g. 'backup_disk_type = hyperdisk-balanced'" } ################################################################################ @@ -118,7 +285,7 @@ locals { resource "google_compute_disk" "sap_hana_boot_disks" { count = var.sap_hana_scaleout_nodes + 1 name = format("${var.instance_name}-boot%05d", count.index + 1) - type = "pd-standard" + type = local.native_bm ? "hyperdisk-balanced" : "pd-balanced" zone = var.zone size = 30 # GB project = var.project_id @@ -132,22 +299,80 @@ resource "google_compute_disk" "sap_hana_boot_disks" { } } -resource "google_compute_disk" "sap_hana_pdssd_disks" { - count = var.sap_hana_scaleout_nodes + 1 - # TODO(b/202736714): check if name is correct - name = format("${var.instance_name}-pdssd%05d", count.index + 1) - type = "pd-ssd" - zone = var.zone - size = local.pdssd_size - project = var.project_id +resource "google_compute_disk" "sap_hana_unified_disks" { + count = var.use_single_shared_data_log_disk ? 1 : 0 + name = format("${var.instance_name}-hana") + type = local.final_disk_type + zone = var.zone + size = local.unified_pd_size + project = var.project_id + provisioned_iops = local.final_unified_iops + provisioned_throughput = local.final_unified_throughput } +resource "google_compute_disk" "sap_hana_unified_worker_disks" { + count = var.use_single_shared_data_log_disk ? var.sap_hana_scaleout_nodes : 0 + name = format("${var.instance_name}-hana%05d", count.index + 1) + type = local.final_disk_type + zone = var.zone + size = local.unified_worker_pd_size + project = var.project_id + provisioned_iops = local.final_unified_worker_iops + provisioned_throughput = local.final_unified_worker_throughput +} + +# Split data/log/sap disks +resource "google_compute_disk" "sap_hana_data_disks" { + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = format("${var.instance_name}-data%05d", count.index + 1) + type = local.final_data_disk_type + zone = var.zone + size = local.data_pd_size + project = var.project_id + provisioned_iops = local.final_data_iops + provisioned_throughput = local.final_data_throughput +} + +resource "google_compute_disk" "sap_hana_log_disks" { + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = format("${var.instance_name}-log%05d", count.index + 1) + type = local.final_log_disk_type + zone = var.zone + size = local.log_pd_size + project = var.project_id + provisioned_iops = local.final_log_iops + provisioned_throughput = local.final_log_throughput +} +resource "google_compute_disk" "sap_hana_shared_disk" { + count = local.make_shared_disk ? 1 : 0 + name = format("${var.instance_name}-shared%05d", count.index + 1) + type = local.final_shared_disk_type + zone = var.zone + size = local.shared_pd_size + project = var.project_id + provisioned_iops = local.final_shared_iops + provisioned_throughput = local.final_shared_throughput +} +resource "google_compute_disk" "sap_hana_usrsap_disks" { + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = format("${var.instance_name}-usrsap%05d", count.index + 1) + type = local.final_usrsap_disk_type + zone = var.zone + size = local.usrsap_pd_size + project = var.project_id + provisioned_iops = local.final_usrsap_iops + provisioned_throughput = local.final_usrsap_throughput +} + + resource "google_compute_disk" "sap_hana_backup_disk" { - # TODO(b/202736714): check if name is correct - name = "${var.instance_name}-backup" - type = "pd-standard" - zone = var.zone - size = local.pdhdd_size - project = var.project_id + count = local.use_backup_disk ? 1 : 0 + name = "${var.instance_name}-backup" + type = local.final_backup_disk_type + zone = var.zone + size = local.backup_size + project = var.project_id + provisioned_iops = local.final_backup_iops + provisioned_throughput = local.final_backup_throughput } ################################################################################ @@ -160,6 +385,7 @@ resource "google_compute_address" "sap_hana_vm_ip" { address_type = "INTERNAL" region = local.region project = var.project_id + address = var.vm_static_ip } resource "google_compute_address" "sap_hana_worker_ip" { @@ -169,6 +395,7 @@ resource "google_compute_address" "sap_hana_worker_ip" { address_type = "INTERNAL" region = local.region project = var.project_id + address = length(var.worker_static_ips) > count.index ? var.worker_static_ips[count.index] : "" } ################################################################################ @@ -187,14 +414,55 @@ resource "google_compute_instance" "sap_hana_primary_instance" { source = google_compute_disk.sap_hana_boot_disks[0].self_link } - attached_disk { - device_name = google_compute_disk.sap_hana_pdssd_disks[0].name - source = google_compute_disk.sap_hana_pdssd_disks[0].self_link + dynamic "scheduling" { + for_each = local.native_bm ? [1] : [] + content { + on_host_maintenance = "TERMINATE" + } } - attached_disk { - device_name = google_compute_disk.sap_hana_backup_disk.name - source = google_compute_disk.sap_hana_backup_disk.self_link + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [1] : [] + content { + device_name = google_compute_disk.sap_hana_unified_disks[0].name + source = google_compute_disk.sap_hana_unified_disks[0].self_link + } + } + + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_data_disks[0].name + source = google_compute_disk.sap_hana_data_disks[0].self_link + } + } + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_log_disks[0].name + source = google_compute_disk.sap_hana_log_disks[0].self_link + } + } + dynamic "attached_disk" { + for_each = length(google_compute_disk.sap_hana_shared_disk) > 0 ? [1] : [] + content { + device_name = google_compute_disk.sap_hana_shared_disk[0].name + source = google_compute_disk.sap_hana_shared_disk[0].self_link + } + } + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_usrsap_disks[0].name + source = google_compute_disk.sap_hana_usrsap_disks[0].self_link + } + } + dynamic "attached_disk" { + for_each = length(google_compute_disk.sap_hana_backup_disk) > 0 ? [1] : [] + content { + device_name = google_compute_disk.sap_hana_backup_disk[0].name + source = google_compute_disk.sap_hana_backup_disk[0].self_link + } } can_ip_forward = var.can_ip_forward @@ -202,6 +470,7 @@ resource "google_compute_instance" "sap_hana_primary_instance" { network_interface { subnetwork = local.subnetwork_uri network_ip = google_compute_address.sap_hana_vm_ip.address + nic_type = var.nic_type == "" ? null : var.nic_type # we only include access_config if public_ip is true, an empty access_config # will create an ephemeral public ip dynamic "access_config" { @@ -221,6 +490,7 @@ resource "google_compute_instance" "sap_hana_primary_instance" { ] } + dynamic "reservation_affinity" { for_each = length(var.reservation_name) > 1 ? [1] : [] content { @@ -241,12 +511,19 @@ resource "google_compute_instance" "sap_hana_primary_instance" { sap_hana_instance_number = var.sap_hana_instance_number sap_hana_sidadm_password = var.sap_hana_sidadm_password sap_hana_sidadm_password_secret = var.sap_hana_sidadm_password_secret - # wording on system_password may be inconsitent with DM sap_hana_system_password = var.sap_hana_system_password sap_hana_system_password_secret = var.sap_hana_system_password_secret sap_hana_sidadm_uid = var.sap_hana_sidadm_uid sap_hana_sapsys_gid = var.sap_hana_sapsys_gid + sap_hana_shared_nfs = local.shared_nfs_endpoint + sap_hana_backup_nfs = local.backup_nfs_endpoint sap_hana_scaleout_nodes = var.sap_hana_scaleout_nodes + use_single_shared_data_log_disk = var.use_single_shared_data_log_disk + sap_hana_backup_disk = local.use_backup_disk + sap_hana_shared_disk = local.make_shared_disk + sap_hana_data_disk_type = local.final_data_disk_type + enable_fast_restart = var.enable_fast_restart + native_bm = local.native_bm template-type = "TERRAFORM" } @@ -271,9 +548,41 @@ resource "google_compute_instance" "sap_hana_worker_instances" { source = google_compute_disk.sap_hana_boot_disks[count.index + 1].self_link } - attached_disk { - device_name = google_compute_disk.sap_hana_pdssd_disks[count.index + 1].name - source = google_compute_disk.sap_hana_pdssd_disks[count.index + 1].self_link + dynamic "scheduling" { + for_each = local.native_bm ? [1] : [] + content { + on_host_maintenance = "TERMINATE" + } + } + + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [1] : [] + content { + device_name = google_compute_disk.sap_hana_unified_worker_disks[count.index].name + source = google_compute_disk.sap_hana_unified_worker_disks[count.index].self_link + } + } + + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_data_disks[count.index + 1].name + source = google_compute_disk.sap_hana_data_disks[count.index + 1].self_link + } + } + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_log_disks[count.index + 1].name + source = google_compute_disk.sap_hana_log_disks[count.index + 1].self_link + } + } + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_usrsap_disks[count.index + 1].name + source = google_compute_disk.sap_hana_usrsap_disks[count.index + 1].self_link + } } can_ip_forward = var.can_ip_forward @@ -281,6 +590,7 @@ resource "google_compute_instance" "sap_hana_worker_instances" { network_interface { subnetwork = local.subnetwork_uri network_ip = google_compute_address.sap_hana_worker_ip[count.index].address + nic_type = var.nic_type == "" ? null : var.nic_type # we only include access_config if public_ip is true, an empty access_config # will create an ephemeral public ip dynamic "access_config" { @@ -320,12 +630,18 @@ resource "google_compute_instance" "sap_hana_worker_instances" { sap_hana_instance_number = var.sap_hana_instance_number sap_hana_sidadm_password = var.sap_hana_sidadm_password sap_hana_sidadm_password_secret = var.sap_hana_sidadm_password_secret - # wording on system_password may be inconsitent with DM sap_hana_system_password = var.sap_hana_system_password sap_hana_system_password_secret = var.sap_hana_system_password_secret sap_hana_sidadm_uid = var.sap_hana_sidadm_uid sap_hana_sapsys_gid = var.sap_hana_sapsys_gid + sap_hana_shared_nfs = local.shared_nfs_endpoint + sap_hana_backup_nfs = local.backup_nfs_endpoint sap_hana_scaleout_nodes = var.sap_hana_scaleout_nodes + use_single_shared_data_log_disk = var.use_single_shared_data_log_disk + sap_hana_backup_disk = false + sap_hana_shared_disk = false + enable_fast_restart = var.enable_fast_restart + native_bm = local.native_bm template-type = "TERRAFORM" } diff --git a/modules/sap_hana/outputs.tf b/modules/sap_hana/outputs.tf index c3c52e8c..a19bf449 100644 --- a/modules/sap_hana/outputs.tf +++ b/modules/sap_hana/outputs.tf @@ -13,7 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - output "sap_hana_primary_self_link" { description = "SAP HANA self-link for the primary instance created" value = google_compute_instance.sap_hana_primary_instance.self_link diff --git a/modules/sap_hana/variables.tf b/modules/sap_hana/variables.tf index 7b317ef8..f1cd9514 100644 --- a/modules/sap_hana/variables.tf +++ b/modules/sap_hana/variables.tf @@ -13,7 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - variable "project_id" { type = string description = "Project id where the instances will be created." @@ -55,8 +54,12 @@ variable "instance_name" { variable "sap_hana_deployment_bucket" { type = string - description = "The GCS bucket containing the SAP HANA media. If this is not defined, the GCE instance will be provisioned without SAP HANA installed." - default = "" + description = "The Cloud Storage path that contains the SAP HANA media, do not include gs://. If this is not defined, the GCE instance will be provisioned without SAP HANA installed." + validation { + condition = (!(length(regexall("gs:", var.sap_hana_deployment_bucket)) > 0)) + error_message = "The sap_hana_deployment_bucket must only contain the Cloud Storage path, which includes the bucket name and the names of any folders. Do not include gs://." + } + default = "" } variable "sap_hana_sid" { @@ -74,7 +77,7 @@ variable "sap_hana_instance_number" { default = 0 validation { condition = (var.sap_hana_instance_number >= 0) && (var.sap_hana_instance_number < 100) - error_message = "The sap_hana_instance_number must be 2 digits long." + error_message = "The sap_hana_instance_number must be a number between 0 and 99." } } @@ -127,10 +130,64 @@ variable "sap_hana_scaleout_nodes" { error_message = "The sap_hana_scaleout_nodes must be positive or 0." } } +variable "sap_hana_backup_nfs_resource" { + default = null + type = object({ + networks = list( + object({ + ip_addresses = list(string) + }) + ) + file_shares = list( + object({ + name = string + }) + ) + + }) + description = "NFS resource to be used as the backup drive instead of a disk. This and sap_hana_backup_nfs may not both be set." +} +variable "sap_hana_shared_nfs_resource" { + default = null + type = object({ + networks = list( + object({ + ip_addresses = list(string) + }) + ) + file_shares = list( + object({ + name = string + }) + ) + + }) + description = "NFS resource to be used as the shared drive instead of a disk. This and sap_hana_shared_nfs may not both be set." +} + +variable "sap_hana_shared_nfs" { + type = string + default = "" + validation { + condition = var.sap_hana_shared_nfs == "" || can(regex("(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}:\\/[^[:space:]]*", var.sap_hana_shared_nfs)) + error_message = "The sap_hana_backup_nfs must be an IP address followed by ':/' then some name." + } + description = "NFS endpoint for /hana/shared storage." +} + +variable "sap_hana_backup_nfs" { + type = string + default = "" + validation { + condition = var.sap_hana_backup_nfs == "" || can(regex("(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}:\\/[^[:space:]]*", var.sap_hana_backup_nfs)) + error_message = "The sap_hana_backup_nfs must be an IP address followed by ':/' then some name." + } + description = "NFS endpoint for /hanabackup storage." +} variable "sap_hana_backup_size" { type = number - description = "Size in GB of the /hanabackup volume. If this is not set or set to zero, the GCE instance will be provisioned with a hana backup volume of 2 times the total memory." + description = "Size in GB of the /hanabackup volume. If this is not set or set to zero, the GCE instance will be provisioned with a hana backup volume of 2 times the total memory. If sap_hana_backup_nfs is set, this setting is ignored." default = 0 validation { condition = var.sap_hana_backup_size >= 0 @@ -199,18 +256,239 @@ variable "post_deployment_script" { default = "" } +variable "nic_type" { + type = string + description = "Optional - This value determines the type of NIC to use, valid options are GVNIC and VIRTIO_NET. If choosing GVNIC make sure that it is supported by your OS choice here https://cloud.google.com/compute/docs/images/os-details#networking." + validation { + condition = contains(["VIRTIO_NET", "GVNIC", ""], var.nic_type) + error_message = "The nic_type must be either GVNIC or VIRTIO_NET." + } + default = "" +} + +variable "disk_type" { + type = string + description = "Optional - The default disk type to use for disk(s) containing log and data volumes. The default is hyperdisk-extreme for native bare metal machines and pd-ssd otherwise. Not all disk are supported on all machine types - see https://cloud.google.com/compute/docs/disks/ for details." + validation { + condition = contains(["", "pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme"], var.disk_type) + error_message = "The disk_type must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "" +} + +variable "use_single_shared_data_log_disk" { + type = bool + description = "Optional - By default three separate disk for data, logs, and shared will be made. If set to true, one disk will be used instead." + default = false +} + +variable "include_backup_disk" { + type = bool + description = "Optional - The default is true. If set creates a disk for backups." + default = true +} + +variable "backup_disk_type" { + type = string + description = "Optional - The default is hyperdisk-balanced for native bare metal machines and pd-balanced otherwise, only used if a backup disk is needed." + validation { + condition = contains(["", "pd-ssd", "pd-balanced", "pd-standard", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme"], var.backup_disk_type) + error_message = "The disk_type must be either pd-ssd, pd-balanced, pd-standard, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "" +} + +variable "vm_static_ip" { + type = string + description = "Optional - Defines an internal static IP for the VM." + validation { + condition = var.vm_static_ip == "" || can(regex("^(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$", var.vm_static_ip)) + error_message = "The vm_static_ip must be a valid IP address." + } + default = "" +} + +variable "worker_static_ips" { + type = list(string) + description = "Optional - Defines internal static IP addresses for the worker nodes." + validation { + condition = alltrue([ + for ip in var.worker_static_ips : ip == "" || can(regex("^(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$", ip)) + ]) + error_message = "All worker_static_ips must be valid IP addresses." + } + default = [] +} + +variable "enable_fast_restart" { + type = bool + description = "Optional - The default is true. If set enables HANA Fast Restart." + default = true +} + +variable "hyperdisk_balanced_iops_default" { + type = number + description = "Optional - default is 3000. Number of IOPS that is set for each disk of type Hyperdisk-balanced (except for boot/usrsap/shared disks)." + default = 3000 +} + +variable "hyperdisk_balanced_throughput_default" { + type = number + description = "Optional - default is 750. Throughput in MB/s that is set for each disk of type Hyperdisk-balanced (except for boot/usrsap/shared disks)." + default = 750 +} + # -# DO NOT MODIFY unless you know what you are doing +# DO NOT MODIFY unless instructed or aware of the implications of using those settings # + +variable "data_disk_type_override" { + type = string + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the data disk." + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.data_disk_type_override) + error_message = "The data_disk_type_override must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "" +} +variable "log_disk_type_override" { + type = string + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the log disk." + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.log_disk_type_override) + error_message = "The log_disk_type_override must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "" +} +variable "shared_disk_type_override" { + type = string + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the shared disk." + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.shared_disk_type_override) + error_message = "The shared_disk_type_override must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "" +} +variable "usrsap_disk_type_override" { + type = string + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the /usr/sap disk." + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.usrsap_disk_type_override) + error_message = "The usrsap_disk_type_override must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "" +} + +variable "unified_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the primary's unified disk, that is based off of the machine_type." + default = null +} +variable "unified_worker_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the unified worker disk(s), that is based off of the machine_type." + default = null +} +variable "data_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the data disk(s), that is based off of the machine_type." + default = null +} +variable "log_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the log disk(s), that is based off of the machine_type." + default = null +} +variable "shared_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the shared disk, that is based off of the machine_type." + default = null +} +variable "usrsap_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the /usr/sap disk(s), that is based off of the machine_type." + default = null +} + +variable "unified_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the primary's unified disk will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "unified_worker_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the unified worker disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "data_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the data disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "log_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the log disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "shared_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the shared disk will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "usrsap_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the /usr/sap disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "backup_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the backup disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "unified_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the primary's unified disk will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "unified_worker_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the unified worker disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "data_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the data disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "log_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the log disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "shared_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the shared disk will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "usrsap_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the /usr/sap disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "backup_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the backup disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} variable "primary_startup_url" { type = string description = "Startup script to be executed when the VM boots, should not be overridden." - default = "curl -s BUILD.TERRA_SH_URL/sap_hana/startup.sh | bash -s BUILD.TERRA_SH_URL" + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform" } variable "secondary_startup_url" { type = string - default = "curl -s BUILD.TERRA_SH_URL/sap_hana/startup_secondary.sh | bash -s BUILD.TERRA_SH_URL" + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana/startup_secondary.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform" description = "DO NOT USE" } diff --git a/modules/sap_hana/versions.tf b/modules/sap_hana/versions.tf index 362db4b4..45135c87 100644 --- a/modules/sap_hana/versions.tf +++ b/modules/sap_hana/versions.tf @@ -13,7 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - terraform { required_version = ">=0.12.6" required_providers { @@ -21,6 +20,10 @@ terraform { source = "hashicorp/google" version = ">= 4.0.0, < 6" } + assert = { + source = "bwoznicki/assert" + version = "0.0.1" + } } provider_meta "google" { diff --git a/modules/sap_hana_ha/README.md b/modules/sap_hana_ha/README.md index 8bedeab5..d27aae7e 100644 --- a/modules/sap_hana_ha/README.md +++ b/modules/sap_hana_ha/README.md @@ -1,65 +1,91 @@ -# terraform-google-sap for SAP HANA HA +# Terraform for SAP HANA HA for Google Cloud -This module is meant to create SAP HANA HA instance(s) for Google Cloud +This template follows the documented steps +https://cloud.google.com/solutions/sap/docs/certifications-sap-hana and deploys +GCP and Pacemaker resources up to the installation of SAP's central services. -The resources/services/activations/deletions that this module will create/trigger are: +## Set up Terraform -- A set of compute engine instances, primary and secondary (if specified) -- A set of compute disks -- IP addresses for the instances to use -- Primary and secondary (if specified) GCE instance groups +Install Terraform on the machine you would like to use to deploy from by +following +https://learn.hashicorp.com/tutorials/terraform/install-cli?in=terraform/gcp-get-started#install-terraform -## Usage +## How to deploy -Basic usage of this module is as follows: +1. Download .tf file into an empty directory `curl + https://storage.googleapis.com/cloudsapdeploy/terraform/latest/terraform/sap_hana_ha/terraform/sap_hana_ha.tf + -o sap_hana_ha.tf` -```hcl -module "sap_hana_ha" { - source = "terraform-google-modules/sap/google//modules/sap_hana_ha" - version = "~> 1.1" +2. Fill in mandatory variables and if the desired optional variable in the .tf + file. - project_id = "PROJECT_ID" # example: my-project-x - machine_type = "MACHINE_TYPE" # example: n1-highmem-32 - network = "NETWORK" # example: default - subnetwork = "SUBNETWORK" # example: default - linux_image = "LINUX_IMAGE" # example: rhel-8-4-sap-ha - linux_image_project = "LINUX_IMAGE_PROJECT" # example: rhel-sap-cloud - primary_instance_name = "PRIMARY_NAME" # example: hana-ha-primary - primary_zone = "PRIMARY_ZONE" # example: us-east1-b, must be in the same region as secondary_zone - secondary_instance_name = "SECONDARY_NAME" # example: hana-ha-secondary - secondary_zone = "SECONDARY_ZONE" # example: us-east1-c, must be in the same region as primary_zone -} -``` +3. Deploy -Functional example is included in the -[examples](../../examples/sap_hana_ha_simple) directory. + 1. Run `terraform init` (only needed once) + 2. Run `terraform plan` to see what is going to be deployed. Verify if + names, zones, sizes, etc. are as desired. + 3. Run `terrafom apply` to deploy the resources + 4. Run `terrafom destroy` to remove the resources + +4. Continue installation of SAP software and setup of remaining cluster + resources as per documentation at + https://cloud.google.com/solutions/sap/docs/sap-hana-ha-tf-deployment + +## Additional information + +For additional information see https://www.terraform.io/docs/index.html and +https://cloud.google.com/docs/terraform ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| +| backup\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the backup disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| backup\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the backup disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| backup\_disk\_type | Optional - The default is hyperdisk-balanced for native bare metal machines and pd-balanced otherwise, only used if a backup disk is needed. | `string` | `""` | no | | can\_ip\_forward | Whether sending and receiving of packets with non-matching source or destination IPs is allowed. | `bool` | `true` | no | +| data\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the data disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| data\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the data disk(s), that is based off of the machine\_type. | `number` | `null` | no | +| data\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the data disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| data\_disk\_type\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default\_disk\_type' for the data disk. | `string` | `""` | no | +| disk\_type | Optional - The default disk type to use for disk(s) containing log and data volumes. The default is hyperdisk-extreme for native bare metal machines and pd-ssd otherwise. Not all disk are supported on all machine types - see https://cloud.google.com/compute/docs/disks/ for details. | `string` | `""` | no | +| enable\_fast\_restart | Optional - The default is true. If set enables HANA Fast Restart. | `bool` | `true` | no | +| hyperdisk\_balanced\_iops\_default | Optional - default is 3000. Number of IOPS that is set for each disk of type Hyperdisk-balanced (except for boot/usrsap/shared disks). | `number` | `3000` | no | +| hyperdisk\_balanced\_throughput\_default | Optional - default is 750. Throughput in MB/s that is set for each disk of type Hyperdisk-balanced (except for boot/usrsap/shared disks). | `number` | `750` | no | +| include\_backup\_disk | Optional - The default is true. If set creates a disk for backups. | `bool` | `true` | no | | is\_work\_load\_management\_deployment | If set the necessary tags and labels will be added to resoucres to support WLM. | `bool` | `false` | no | | linux\_image | Linux image name to use. | `string` | n/a | yes | | linux\_image\_project | The project which the Linux image belongs to. | `string` | n/a | yes | | loadbalancer\_name | OPTIONAL - Name of the load balancer that will be created. If left blank with use\_ilb\_vip set to true, then will use lb-SID as default | `string` | `""` | no | +| log\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the log disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| log\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the log disk(s), that is based off of the machine\_type. | `number` | `null` | no | +| log\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the log disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| log\_disk\_type\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default\_disk\_type' for the log disk. | `string` | `""` | no | | machine\_type | Machine type for the instances. | `string` | n/a | yes | +| majority\_maker\_instance\_name | Optional - Name to use for the Majority Maker instance. Must be provided if scaleout\_nodes > 0. | `string` | `""` | no | +| majority\_maker\_machine\_type | Optional - The machine type to use for the Majority Maker instance. Must be provided if scaleout\_nodes > 0. | `string` | `""` | no | +| majority\_maker\_startup\_url | DO NOT USE | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana_ha/startup_majority_maker.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform"` | no | +| majority\_maker\_zone | Optional - The zone in which the Majority Maker instance will be deployed. Must be provided if scaleout\_nodes > 0. It is recommended for this to be different from the zones the primary and secondary instance are deployed in. | `string` | `""` | no | | network | Network in which the ILB resides including resources like firewall rules. | `string` | n/a | yes | | network\_tags | OPTIONAL - Network tags can be associated to your instance on deployment. This can be used for firewalling or routing purposes. | `list(string)` | `[]` | no | +| nic\_type | Optional - This value determines the type of NIC to use, valid options are GVNIC and VIRTIO\_NET. If choosing GVNIC make sure that it is supported by your OS choice here https://cloud.google.com/compute/docs/images/os-details#networking. | `string` | `""` | no | | post\_deployment\_script | OPTIONAL - gs:// or https:// location of a script to execute on the created VM's post deployment. | `string` | `""` | no | | primary\_instance\_group\_name | OPTIONAL - Unmanaged instance group to be created for the primary node. If blank, will use ig-VM\_NAME | `string` | `""` | no | | primary\_instance\_name | Hostname of the primary GCE instance. | `string` | n/a | yes | | primary\_reservation\_name | Use a reservation specified by RESERVATION\_NAME.
By default ANY\_RESERVATION is used when this variable is empty.
In order for a reservation to be used it must be created with the
"Select specific reservation" selected (specificReservationRequired set to true)
Be sure to create your reservation with the correct Min CPU Platform for the
following instance types:
n1-highmem-32 : Intel Broadwell
n1-highmem-64 : Intel Broadwell
n1-highmem-96 : Intel Skylake
n1-megamem-96 : Intel Skylake
m1-megamem-96 : Intel Skylake
All other instance types can have automatic Min CPU Platform" | `string` | `""` | no | -| primary\_startup\_url | Startup script to be executed when the VM boots, should not be overridden. | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202210141928/terraform/sap_hana_ha/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202210141928/terraform"` | no | +| primary\_startup\_url | Startup script to be executed when the VM boots, should not be overridden. | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana_ha/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform"` | no | +| primary\_static\_ip | Optional - Defines an internal static IP for the primary VM. | `string` | `""` | no | +| primary\_worker\_static\_ips | Optional - Defines internal static IP addresses for the primary worker nodes. | `list(string)` | `[]` | no | | primary\_zone | Zone where the primary instances will be created. | `string` | n/a | yes | | project\_id | Project id where the instances will be created. | `string` | n/a | yes | | public\_ip | OPTIONAL - Defines whether a public IP address should be added to your VM. By default this is set to Yes. Note that if you set this to No without appropriate network nat and tags in place, there will be no route to the internet and thus the installation will fail. | `bool` | `true` | no | | sap\_deployment\_debug | OPTIONAL - If this value is set to true, the deployment will generates verbose deployment logs. Only turn this setting on if a Google support engineer asks you to enable debugging. | `bool` | `false` | no | | sap\_hana\_backup\_size | Size in GB of the /hanabackup volume. If this is not set or set to zero, the GCE instance will be provisioned with a hana backup volume of 2 times the total memory. | `number` | `0` | no | -| sap\_hana\_deployment\_bucket | The GCS bucket containing the SAP HANA media. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. | `string` | `""` | no | +| sap\_hana\_deployment\_bucket | The Cloud Storage path that contains the SAP HANA media, do not include gs://. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. | `string` | `""` | no | | sap\_hana\_instance\_number | The SAP instance number. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. | `number` | `0` | no | | sap\_hana\_sapsys\_gid | The Linux GID of the SAPSYS group. By default this is set to 79 | `number` | `79` | no | +| sap\_hana\_scaleout\_nodes | Optional - Specify to add scaleout nodes to both HA instances. | `number` | `0` | no | | sap\_hana\_sid | The SAP HANA SID. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. SID must adhere to SAP standard (Three letters or numbers and start with a letter) | `string` | `""` | no | | sap\_hana\_sidadm\_password | The linux sidadm login password. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. Minimum requirement is 8 characters. | `string` | `""` | no | | sap\_hana\_sidadm\_password\_secret | The secret key used to retrieve the linux sidadm login from Secret Manager (https://cloud.google.com/secret-manager). The Secret Manager password will overwrite the clear text password from sap\_hana\_sidadm\_password if both are set. | `string` | `""` | no | @@ -70,11 +96,26 @@ Functional example is included in the | secondary\_instance\_group\_name | OPTIONAL - Unmanaged instance group to be created for the secondary node. If blank, will use ig-VM\_NAME | `string` | `""` | no | | secondary\_instance\_name | Hostname of the secondary GCE instance. | `string` | n/a | yes | | secondary\_reservation\_name | Use a reservation specified by RESERVATION\_NAME.
By default ANY\_RESERVATION is used when this variable is empty.
In order for a reservation to be used it must be created with the
"Select specific reservation" selected (specificReservationRequired set to true)
Be sure to create your reservation with the correct Min CPU Platform for the
following instance types:
n1-highmem-32 : Intel Broadwell
n1-highmem-64 : Intel Broadwell
n1-highmem-96 : Intel Skylake
n1-megamem-96 : Intel Skylake
m1-megamem-96 : Intel Skylake
All other instance types can have automatic Min CPU Platform" | `string` | `""` | no | -| secondary\_startup\_url | DO NOT USE | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202210141928/terraform/sap_hana_ha/startup_secondary.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202210141928/terraform"` | no | +| secondary\_startup\_url | DO NOT USE | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana_ha/startup_secondary.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform"` | no | +| secondary\_static\_ip | Optional - Defines an internal static IP for the secondary VM. | `string` | `""` | no | +| secondary\_worker\_static\_ips | Optional - Defines internal static IP addresses for the secondary worker nodes. | `list(string)` | `[]` | no | | secondary\_zone | Zone where the secondary instances will be created. | `string` | n/a | yes | | service\_account | OPTIONAL - Ability to define a custom service account instead of using the default project service account. | `string` | `""` | no | +| shared\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the shared disk will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| shared\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the shared disk, that is based off of the machine\_type. | `number` | `null` | no | +| shared\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the shared disk will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| shared\_disk\_type\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default\_disk\_type' for the shared disk. | `string` | `""` | no | | subnetwork | The sub network to deploy the instance in. | `string` | n/a | yes | +| unified\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the primary's unified disk will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| unified\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the primary disk(s), that is based off of the machine\_type. | `number` | `null` | no | +| unified\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the primary's unified disk will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| use\_single\_shared\_data\_log\_disk | Optional - By default three separate disk for data, logs, and shared will be made. If set to true, one disk will be used instead. | `bool` | `false` | no | +| usrsap\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the /usr/sap disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| usrsap\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the /usr/sap disk(s), that is based off of the machine\_type. | `number` | `null` | no | +| usrsap\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the /usr/sap disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| usrsap\_disk\_type\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default\_disk\_type' for the /usr/sap disk. | `string` | `""` | no | | wlm\_deployment\_name | Deployment name to be used for integrating into Work Load Management. | `string` | `""` | no | +| worker\_startup\_url | Startup script to be executed when the worker VM boots, should not be overridden. | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana_ha/startup_worker.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform"` | no | ## Outputs @@ -83,7 +124,9 @@ Functional example is included in the | sap\_hana\_ha\_firewall\_link | Link to the optional fire wall | | sap\_hana\_ha\_loadbalander\_link | Link to the optional load balancer | | sap\_hana\_ha\_primary\_instance\_self\_link | Self-link for the primary SAP HANA HA instance created. | +| sap\_hana\_ha\_primary\_worker\_self\_links | Self-link for the worker nodes in the primary SAP HANA HA instance. | | sap\_hana\_ha\_secondary\_instance\_self\_link | Self-link for the secondary SAP HANA HA instance created. | +| sap\_hana\_ha\_secondary\_worker\_self\_links | Self-link for the worker nodes in the secondary SAP HANA HA instance. | diff --git a/modules/sap_hana_ha/main.tf b/modules/sap_hana_ha/main.tf index 18d41cb9..64fd8749 100644 --- a/modules/sap_hana_ha/main.tf +++ b/modules/sap_hana_ha/main.tf @@ -13,12 +13,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - # # Terraform SAP HANA HA for Google Cloud # -# Version: 2.0.2022101419281665775728 -# Build Hash: 5f4ef08feb4fed0e1eabc3bfc4b2d64d99001ae7 +# Version: 2.0.202404101403 +# Build Hash: c1f78e4d8c44de3be18fc7b3a64ccf60a94a85bc # ################################################################################ @@ -26,69 +25,205 @@ ################################################################################ locals { mem_size_map = { - "n1-highmem-32" = 208 - "n1-highmem-64" = 416 - "n1-highmem-96" = 624 - "n1-megamem-96" = 1433 - "n2-highmem-32" = 256 - "n2-highmem-48" = 386 - "n2-highmem-64" = 512 - "n2-highmem-80" = 640 - "n2-highmem-96" = 768 - "n2-highmem-128" = 864 - "n1-ultramem-40" = 961 - "n1-ultramem-80" = 1922 - "n1-ultramem-160" = 3844 - "m1-megamem-96" = 1433 - "m1-ultramem-40" = 961 - "m1-ultramem-80" = 1922 - "m1-ultramem-160" = 3844 - "m2-ultramem-208" = 5916 - "m2-megamem-416" = 5916 - "m2-hypermem-416" = 8832 - "m2-ultramem-416" = 11832 + "n1-highmem-32" = 208 + "n1-highmem-64" = 416 + "n1-highmem-96" = 624 + "n1-megamem-96" = 1433 + "n2-highmem-32" = 256 + "n2-highmem-48" = 384 + "n2-highmem-64" = 512 + "n2-highmem-80" = 640 + "n2-highmem-96" = 768 + "n2-highmem-128" = 864 + "n1-ultramem-40" = 961 + "n1-ultramem-80" = 1922 + "n1-ultramem-160" = 3844 + "m1-megamem-96" = 1433 + "m1-ultramem-40" = 961 + "m1-ultramem-80" = 1922 + "m1-ultramem-160" = 3844 + "m2-ultramem-208" = 5888 + "m2-megamem-416" = 5888 + "m2-hypermem-416" = 8832 + "m2-ultramem-416" = 11744 + "m3-megamem-64" = 976 + "m3-megamem-128" = 1952 + "m3-ultramem-32" = 976 + "m3-ultramem-64" = 1952 + "m3-ultramem-128" = 3904 + "c3-standard-44" = 176 + "c3-highmem-44" = 352 + "c3-highmem-88" = 704 + "c3-highmem-176" = 1408 + "c3-standard-192-metal" = 768 + "c3-highcpu-192-metal" = 512 + "c3-highmem-192-metal" = 1536 + "x4-megamem-960-metal" = 16384 + "x4-megamem-1440-metal" = 24576 + "x4-megamem-1920-metal" = 32768 } cpu_platform_map = { - "n1-standard-16" = "Intel Haswell" - "n1-highmem-32" = "Intel Broadwell" - "n1-highmem-64" = "Intel Broadwell" - "n1-highmem-96" = "Intel Skylake" - "n1-megamem-96" = "Intel Skylake" - "n2-highmem-32" = "Automatic" - "n2-highmem-48" = "Automatic" - "n2-highmem-64" = "Automatic" - "n2-highmem-80" = "Automatic" - "n2-highmem-96" = "Automatic" - "n2-highmem-128" = "Automatic" - "n1-ultramem-40" = "Automatic" - "n1-ultramem-80" = "Automatic" - "n1-ultramem-160" = "Automatic" - "m1-megamem-96" = "Intel Skylake" - "m1-ultramem-40" = "Automatic" - "m1-ultramem-80" = "Automatic" - "m1-ultramem-160" = "Automatic" - "m2-ultramem-208" = "Automatic" - "m2-megamem-416" = "Automatic" - "m2-hypermem-416" = "Automatic" - "m2-ultramem-416" = "Automatic" - } - mem_size = lookup(local.mem_size_map, var.machine_type, 320) - hana_log_size_min = min(512, max(64, local.mem_size / 2)) - hana_data_size_min = local.mem_size * 12 / 10 - hana_shared_size_min = min(1024, local.mem_size) + "n1-highmem-32" = "Intel Broadwell" + "n1-highmem-64" = "Intel Broadwell" + "n1-highmem-96" = "Intel Skylake" + "n1-megamem-96" = "Intel Skylake" + "n2-highmem-32" = "Automatic" + "n2-highmem-48" = "Automatic" + "n2-highmem-64" = "Automatic" + "n2-highmem-80" = "Automatic" + "n2-highmem-96" = "Automatic" + "n2-highmem-128" = "Automatic" + "n1-ultramem-40" = "Automatic" + "n1-ultramem-80" = "Automatic" + "n1-ultramem-160" = "Automatic" + "m1-megamem-96" = "Intel Skylake" + "m1-ultramem-40" = "Automatic" + "m1-ultramem-80" = "Automatic" + "m1-ultramem-160" = "Automatic" + "m2-ultramem-208" = "Automatic" + "m2-megamem-416" = "Automatic" + "m2-hypermem-416" = "Automatic" + "m2-ultramem-416" = "Automatic" + "m3-megamem-64" = "Automatic" + "m3-megamem-128" = "Automatic" + "m3-ultramem-32" = "Automatic" + "m3-ultramem-64" = "Automatic" + "m3-ultramem-128" = "Automatic" + "c3-standard-44" = "Automatic" + "c3-highmem-44" = "Automatic" + "c3-highmem-88" = "Automatic" + "c3-highmem-176" = "Automatic" + "c3-standard-192-metal" = "Automatic" + "c3-highcpu-192-metal" = "Automatic" + "c3-highmem-192-metal" = "Automatic" + "x4-megamem-960-metal" = "Automatic" + "x4-megamem-1440-metal" = "Automatic" + "x4-megamem-1920-metal" = "Automatic" + } + + native_bm = length(regexall("metal", var.machine_type)) > 0 + + # Minimum disk sizes are used to ensure throughput. Extreme disks don't need this. + # All 'over provisioned' capacity is to go onto the data disk. + final_disk_type = var.disk_type == "" ? (local.native_bm ? "hyperdisk-extreme" : "pd-ssd") : var.disk_type + min_total_disk_map = { + "pd-ssd" = 550 + "pd-balanced" = 943 + "pd-extreme" = 0 + "hyperdisk-balanced" = 0 + "hyperdisk-extreme" = 0 + } + min_total_disk = local.min_total_disk_map[local.final_disk_type] + + mem_size = lookup(local.mem_size_map, var.machine_type, 320) + hana_log_size = ceil(min(512, max(64, local.mem_size / 2))) + hana_data_size_min = ceil(local.mem_size * 12 / 10) + hana_shared_size = min(1024, local.mem_size) + hana_usrsap_size = 32 default_boot_size = 30 - hana_log_size = local.hana_log_size_min - hana_data_size = local.hana_data_size_min + hana_data_size = max(local.hana_data_size_min, local.min_total_disk - local.hana_usrsap_size - local.hana_log_size - local.hana_shared_size) all_network_tag_items = concat(var.network_tags, ["sap-${local.healthcheck_name}-port"]) network_tags = local.all_network_tag_items - pdhdd_size = var.sap_hana_backup_size > 0 ? var.sap_hana_backup_size : 2 * local.mem_size - # ensure pd-ssd meets minimum size/performance - pdssd_size = ceil(max(834, local.hana_log_size + local.hana_data_size + local.hana_shared_size_min + 32 + 1)) + # ensure the combined disk meets minimum size/performance + pd_size = ceil(max(local.min_total_disk, local.hana_log_size + local.hana_data_size_min + local.hana_shared_size + local.hana_usrsap_size + 1)) + + unified_pd_size = var.unified_disk_size_override == null ? local.pd_size : var.unified_disk_size_override + data_pd_size = var.data_disk_size_override == null ? local.hana_data_size : var.data_disk_size_override + log_pd_size = var.log_disk_size_override == null ? local.hana_log_size : var.log_disk_size_override + shared_pd_size = var.shared_disk_size_override == null ? local.hana_shared_size : var.shared_disk_size_override + usrsap_pd_size = var.usrsap_disk_size_override == null ? local.hana_usrsap_size : var.usrsap_disk_size_override + backup_pd_size = var.sap_hana_backup_size > 0 ? var.sap_hana_backup_size : 2 * local.mem_size + + # Disk types + final_data_disk_type = var.data_disk_type_override == "" ? local.final_disk_type : var.data_disk_type_override + final_log_disk_type = var.log_disk_type_override == "" ? local.final_disk_type : var.log_disk_type_override + temp_shared_disk_type = local.native_bm ? "hyperdisk-balanced" : (contains(["hyperdisk-extreme", "hyperdisk-balanced", "pd-extreme"], local.final_disk_type) ? "pd-balanced" : local.final_disk_type) + temp_usrsap_disk_type = local.native_bm ? "hyperdisk-balanced" : (contains(["hyperdisk-extreme", "hyperdisk-balanced", "pd-extreme"], local.final_disk_type) ? "pd-balanced" : local.final_disk_type) + final_shared_disk_type = var.shared_disk_type_override == "" ? local.temp_shared_disk_type : var.shared_disk_type_override + final_usrsap_disk_type = var.usrsap_disk_type_override == "" ? local.temp_usrsap_disk_type : var.usrsap_disk_type_override + final_backup_disk_type = var.backup_disk_type == "" ? (local.native_bm ? "hyperdisk-balanced" : "pd-balanced") : var.backup_disk_type + + # Disk IOPS + hdx_iops_map = { + "data" = max(10000, local.data_pd_size * 2) + "log" = max(10000, local.log_pd_size * 2) + "shared" = null + "usrsap" = null + "unified" = max(10000, local.data_pd_size * 2) + max(10000, local.log_pd_size * 2) + "worker" = max(10000, local.data_pd_size * 2) + max(10000, local.log_pd_size * 2) + "backup" = max(10000, 2 * local.backup_pd_size) + } + hdb_iops_map = { + "data" = var.hyperdisk_balanced_iops_default + "log" = var.hyperdisk_balanced_iops_default + "shared" = null + "usrsap" = null + "unified" = var.hyperdisk_balanced_iops_default + "worker" = var.hyperdisk_balanced_iops_default + "backup" = var.hyperdisk_balanced_iops_default + } + null_iops_map = { + "data" = null + "log" = null + "shared" = null + "usrsap" = null + "unified" = null + "worker" = null + "backup" = null + } + iops_map = { + "pd-ssd" = local.null_iops_map + "pd-balanced" = local.null_iops_map + "pd-extreme" = local.hdx_iops_map + "hyperdisk-balanced" = local.hdb_iops_map + "hyperdisk-extreme" = local.hdx_iops_map + } + + final_data_iops = var.data_disk_iops_override == null ? local.iops_map[local.final_data_disk_type]["data"] : var.data_disk_iops_override + final_log_iops = var.log_disk_iops_override == null ? local.iops_map[local.final_log_disk_type]["log"] : var.log_disk_iops_override + final_shared_iops = var.shared_disk_iops_override == null ? local.iops_map[local.final_shared_disk_type]["shared"] : var.shared_disk_iops_override + final_usrsap_iops = var.usrsap_disk_iops_override == null ? local.iops_map[local.final_usrsap_disk_type]["usrsap"] : var.usrsap_disk_iops_override + final_unified_iops = var.unified_disk_iops_override == null ? local.iops_map[local.final_disk_type]["unified"] : var.unified_disk_iops_override + final_backup_iops = var.backup_disk_iops_override == null ? local.iops_map[local.final_backup_disk_type]["backup"] : var.backup_disk_iops_override + + # Disk throughput MB/s + hdb_throughput_map = { + "data" = var.hyperdisk_balanced_throughput_default + "log" = var.hyperdisk_balanced_throughput_default + "shared" = null + "usrsap" = null + "unified" = var.hyperdisk_balanced_throughput_default + "worker" = var.hyperdisk_balanced_throughput_default + "backup" = var.hyperdisk_balanced_throughput_default + } + null_throughput_map = { + "data" = null + "log" = null + "shared" = null + "usrsap" = null + "unified" = null + "worker" = null + "backup" = null + } + throughput_map = { + "pd-ssd" = local.null_throughput_map + "pd-balanced" = local.null_throughput_map + "pd-extreme" = local.null_throughput_map + "hyperdisk-balanced" = local.hdb_throughput_map + "hyperdisk-extreme" = local.null_throughput_map + } + + final_data_throughput = var.data_disk_throughput_override == null ? local.throughput_map[local.final_data_disk_type]["data"] : var.data_disk_throughput_override + final_log_throughput = var.log_disk_throughput_override == null ? local.throughput_map[local.final_log_disk_type]["log"] : var.log_disk_throughput_override + final_shared_throughput = var.shared_disk_throughput_override == null ? local.throughput_map[local.final_shared_disk_type]["shared"] : var.shared_disk_throughput_override + final_usrsap_throughput = var.usrsap_disk_throughput_override == null ? local.throughput_map[local.final_usrsap_disk_type]["usrsap"] : var.usrsap_disk_throughput_override + final_unified_throughput = var.unified_disk_throughput_override == null ? local.throughput_map[local.final_disk_type]["unified"] : var.unified_disk_throughput_override + final_backup_throughput = var.backup_disk_throughput_override == null ? local.throughput_map[local.final_backup_disk_type]["backup"] : var.backup_disk_throughput_override sap_vip_solution = "ILB" sap_hc_port = 60000 + var.sap_hana_instance_number @@ -140,6 +275,53 @@ locals { goog-wl-os = local.os_full_name }) : {} + + primary_startup_url = var.sap_deployment_debug ? replace(var.primary_startup_url, "bash -s", "bash -x -s") : var.primary_startup_url + worker_startup_url = var.sap_deployment_debug ? replace(var.worker_startup_url, "bash -s", "bash -x -s") : var.worker_startup_url + secondary_startup_url = var.sap_deployment_debug ? replace(var.secondary_startup_url, "bash -s", "bash -x -s") : var.secondary_startup_url + mm_startup_url = var.sap_deployment_debug ? replace(var.majority_maker_startup_url, "bash -s", "bash -x -s") : var.majority_maker_startup_url + + # HA Scaleout features + mm_partially_defined = (var.majority_maker_instance_name != "") || (var.majority_maker_machine_type != "") || (var.majority_maker_zone != "") + mm_fully_defined = (var.majority_maker_instance_name != "") && (var.majority_maker_machine_type != "") && (var.majority_maker_zone != "") + mm_zone_split = split("-", var.majority_maker_zone) + mm_region = length(local.mm_zone_split) < 3 ? "" : join("-", [local.mm_zone_split[0], local.mm_zone_split[1]]) +} + +# tflint-ignore: terraform_unused_declarations +data "assert_test" "scaleout_needs_mm" { + test = (local.mm_partially_defined && var.sap_hana_scaleout_nodes > 0) || (!local.mm_partially_defined && var.sap_hana_scaleout_nodes == 0) + throw = "sap_hana_scaleout_nodes and all majority_maker variables must be specified together: majority_maker_instance_name, majority_maker_machine_type, majority_maker_zone" +} +# tflint-ignore: terraform_unused_declarations +data "assert_test" "fully_specify_mm" { + test = !local.mm_partially_defined || local.mm_fully_defined + throw = "majority_maker_instance_name, majority_maker_machine_type, and majority_maker_zone must all be specified together" +} +# tflint-ignore: terraform_unused_declarations +data "assert_test" "mm_region_check" { + test = !local.mm_fully_defined || local.mm_region == local.region + throw = "Majority maker must be in the same region as the primary and secondary instances" +} +# tflint-ignore: terraform_unused_declarations +resource "validation_warning" "mm_zone_warning" { + condition = (var.majority_maker_zone == var.primary_zone) || (var.majority_maker_zone == var.secondary_zone) + summary = "It is recommended that the Majority Maker exist in a separate zone but same region from the primary and secondary instances." +} +# tflint-ignore: terraform_unused_declarations +data "assert_test" "no_rhel_with_scaleout" { + test = var.sap_hana_scaleout_nodes == 0 || !can(regex("rhel", var.linux_image_project)) + throw = "HANA HA Scaleout deployment is currently only supported on SLES operating systems." +} +# tflint-ignore: terraform_unused_declarations +data "assert_test" "hyperdisk_with_native_bm" { + test = local.native_bm ? length(regexall("hyperdisk", local.final_disk_type)) > 0 : true + throw = "Native bare metal machines only work with hyperdisks. Set 'disk_type' accordingly, e.g. 'disk_type = hyperdisk-balanced'" +} +# tflint-ignore: terraform_unused_declarations +data "assert_test" "backup_hyperdisk_with_native_bm" { + test = local.native_bm && var.include_backup_disk ? (length(regexall("hyperdisk", local.final_backup_disk_type)) > 0) : true + throw = "Native bare metal machines only work with hyperdisks. Set 'backup_disk_type' accordingly, e.g. 'backup_disk_type = hyperdisk-balanced'" } ################################################################################ @@ -153,8 +335,21 @@ resource "google_compute_address" "sap_hana_ha_vm_ip" { address_type = "INTERNAL" region = local.region project = var.project_id + address = count.index == 0 ? var.primary_static_ip : var.secondary_static_ip } +resource "google_compute_address" "sap_hana_ha_worker_vm_ip" { + count = var.sap_hana_scaleout_nodes * 2 + name = (count.index % 2) == 0 ? "${var.primary_instance_name}w${floor(count.index / 2) + 1}-vm-ip" : "${var.secondary_instance_name}w${floor(count.index / 2) + 1}-vm-ip" + subnetwork = local.subnetwork_uri + address_type = "INTERNAL" + region = local.region + project = var.project_id + # The worker node IPs are all in one list, alternating between primary and secondary + address = (count.index % 2) == 0 ? ( + length(var.primary_worker_static_ips) > floor(count.index / 2) ? var.primary_worker_static_ips[floor(count.index / 2)] : "") : ( + length(var.secondary_worker_static_ips) > floor(count.index / 2) ? var.secondary_worker_static_ips[floor(count.index / 2)] : "") +} ################################################################################ # Primary Instance @@ -162,9 +357,10 @@ resource "google_compute_address" "sap_hana_ha_vm_ip" { ################################################################################ # disks ################################################################################ -resource "google_compute_disk" "sap_hana_ha_primary_boot_disk" { - name = "${var.primary_instance_name}-boot" - type = "pd-standard" +resource "google_compute_disk" "sap_hana_ha_primary_boot_disks" { + count = var.sap_hana_scaleout_nodes + 1 + name = count.index == 0 ? "${var.primary_instance_name}-boot" : "${var.primary_instance_name}w${count.index}-boot" + type = local.native_bm ? "hyperdisk-balanced" : "pd-balanced" zone = var.primary_zone size = local.default_boot_size project = var.project_id @@ -177,24 +373,74 @@ resource "google_compute_disk" "sap_hana_ha_primary_boot_disk" { ignore_changes = [image] } } -resource "google_compute_disk" "sap_hana_ha_primary_pdssd_disk" { - name = "${var.primary_instance_name}-pdssd" - type = "pd-ssd" - zone = var.primary_zone - size = local.pdssd_size - project = var.project_id +resource "google_compute_disk" "sap_hana_ha_primary_unified_disks" { + count = var.use_single_shared_data_log_disk ? var.sap_hana_scaleout_nodes + 1 : 0 + name = count.index == 0 ? "${var.primary_instance_name}-hana" : "${var.primary_instance_name}w${count.index}-hana" + type = local.final_disk_type + zone = var.primary_zone + size = local.unified_pd_size + project = var.project_id + provisioned_iops = local.final_unified_iops + provisioned_throughput = local.final_unified_throughput +} + +# Split data/log/sap disks +resource "google_compute_disk" "sap_hana_ha_primary_data_disks" { + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = count.index == 0 ? "${var.primary_instance_name}-data" : "${var.primary_instance_name}w${count.index}-data" + type = local.final_data_disk_type + zone = var.primary_zone + size = local.data_pd_size + project = var.project_id + provisioned_iops = local.final_data_iops + provisioned_throughput = local.final_data_throughput +} + +resource "google_compute_disk" "sap_hana_ha_primary_log_disks" { + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = count.index == 0 ? "${var.primary_instance_name}-log" : "${var.primary_instance_name}w${count.index}-log" + type = local.final_log_disk_type + zone = var.primary_zone + size = local.log_pd_size + project = var.project_id + provisioned_iops = local.final_log_iops + provisioned_throughput = local.final_log_throughput +} +resource "google_compute_disk" "sap_hana_ha_primary_shared_disk" { + count = var.use_single_shared_data_log_disk ? 0 : 1 + name = "${var.primary_instance_name}-shared" + type = local.final_shared_disk_type + zone = var.primary_zone + size = local.shared_pd_size + project = var.project_id + provisioned_iops = local.final_shared_iops + provisioned_throughput = local.final_shared_throughput +} +resource "google_compute_disk" "sap_hana_ha_primary_usrsap_disks" { + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = count.index == 0 ? "${var.primary_instance_name}-usrsap" : "${var.primary_instance_name}w${count.index}-usrsap" + type = local.final_usrsap_disk_type + zone = var.primary_zone + size = local.usrsap_pd_size + project = var.project_id + provisioned_iops = local.final_usrsap_iops + provisioned_throughput = local.final_usrsap_throughput } resource "google_compute_disk" "sap_hana_ha_primary_backup_disk" { - name = "${var.primary_instance_name}-backup" - type = "pd-standard" - zone = var.primary_zone - size = local.pdhdd_size - project = var.project_id + count = var.include_backup_disk ? 1 : 0 + name = "${var.primary_instance_name}-backup" + type = local.final_backup_disk_type + zone = var.primary_zone + size = local.backup_pd_size + project = var.project_id + provisioned_iops = local.final_backup_iops + provisioned_throughput = local.final_backup_throughput } ################################################################################ # instance ################################################################################ + resource "google_compute_instance" "sap_hana_ha_primary_instance" { name = var.primary_instance_name machine_type = var.machine_type @@ -206,25 +452,66 @@ resource "google_compute_instance" "sap_hana_ha_primary_instance" { boot_disk { auto_delete = true device_name = "boot" - source = google_compute_disk.sap_hana_ha_primary_boot_disk.self_link + source = google_compute_disk.sap_hana_ha_primary_boot_disks[0].self_link } - attached_disk { - device_name = google_compute_disk.sap_hana_ha_primary_pdssd_disk.name - source = google_compute_disk.sap_hana_ha_primary_pdssd_disk.self_link + dynamic "scheduling" { + for_each = local.native_bm ? [1] : [] + content { + on_host_maintenance = "TERMINATE" + } } - attached_disk { - device_name = google_compute_disk.sap_hana_ha_primary_backup_disk.name - source = google_compute_disk.sap_hana_ha_primary_backup_disk.self_link + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [1] : [] + content { + device_name = google_compute_disk.sap_hana_ha_primary_unified_disks[0].name + source = google_compute_disk.sap_hana_ha_primary_unified_disks[0].self_link + } + } + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_primary_data_disks[0].name + source = google_compute_disk.sap_hana_ha_primary_data_disks[0].self_link + } + } + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_primary_log_disks[0].name + source = google_compute_disk.sap_hana_ha_primary_log_disks[0].self_link + } + } + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_primary_shared_disk[0].name + source = google_compute_disk.sap_hana_ha_primary_shared_disk[0].self_link + } + } + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_primary_usrsap_disks[0].name + source = google_compute_disk.sap_hana_ha_primary_usrsap_disks[0].self_link + } } + dynamic "attached_disk" { + for_each = var.include_backup_disk ? [1] : [] + content { + device_name = google_compute_disk.sap_hana_ha_primary_backup_disk[0].name + source = google_compute_disk.sap_hana_ha_primary_backup_disk[0].self_link + } + } can_ip_forward = var.can_ip_forward network_interface { subnetwork = local.subnetwork_uri network_ip = google_compute_address.sap_hana_ha_vm_ip[0].address + nic_type = var.nic_type == "" ? null : var.nic_type # we only include access_config if public_ip is true, an empty access_config # will create an ephemeral public ip dynamic "access_config" { @@ -259,7 +546,7 @@ resource "google_compute_instance" "sap_hana_ha_primary_instance" { metadata = merge( { - startup-script = var.primary_startup_url + startup-script = local.primary_startup_url post_deployment_script = var.post_deployment_script sap_deployment_debug = var.sap_deployment_debug sap_hana_deployment_bucket = var.sap_hana_deployment_bucket @@ -279,6 +566,14 @@ resource "google_compute_instance" "sap_hana_ha_primary_instance" { sap_secondary_instance = var.secondary_instance_name sap_primary_zone = var.primary_zone sap_secondary_zone = var.secondary_zone + use_single_shared_data_log_disk = var.use_single_shared_data_log_disk + sap_hana_backup_disk = var.include_backup_disk + sap_hana_shared_disk = !var.use_single_shared_data_log_disk + sap_hana_scaleout_nodes = var.sap_hana_scaleout_nodes + majority_maker_instance_name = local.mm_fully_defined ? var.majority_maker_instance_name : "" + sap_hana_data_disk_type = local.final_data_disk_type + enable_fast_restart = var.enable_fast_restart + native_bm = local.native_bm template-type = "TERRAFORM" }, local.wlm_metadata @@ -290,15 +585,147 @@ resource "google_compute_instance" "sap_hana_ha_primary_instance" { } } +resource "google_compute_instance" "sap_hana_ha_primary_workers" { + count = var.sap_hana_scaleout_nodes + name = "${var.primary_instance_name}w${count.index + 1}" + machine_type = var.machine_type + zone = var.primary_zone + project = var.project_id + + min_cpu_platform = lookup(local.cpu_platform_map, var.machine_type, "Automatic") + + boot_disk { + auto_delete = true + device_name = "boot" + source = google_compute_disk.sap_hana_ha_primary_boot_disks[count.index + 1].self_link + } + + dynamic "scheduling" { + for_each = local.native_bm ? [1] : [] + content { + on_host_maintenance = "TERMINATE" + } + } + + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [1] : [] + content { + device_name = google_compute_disk.sap_hana_ha_primary_unified_disks[count.index + 1].name + source = google_compute_disk.sap_hana_ha_primary_unified_disks[count.index + 1].self_link + } + } + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_primary_data_disks[count.index + 1].name + source = google_compute_disk.sap_hana_ha_primary_data_disks[count.index + 1].self_link + } + } + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_primary_log_disks[count.index + 1].name + source = google_compute_disk.sap_hana_ha_primary_log_disks[count.index + 1].self_link + } + } + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_primary_usrsap_disks[count.index + 1].name + source = google_compute_disk.sap_hana_ha_primary_usrsap_disks[count.index + 1].self_link + } + } + can_ip_forward = var.can_ip_forward + + network_interface { + subnetwork = local.subnetwork_uri + # The worker node IPs are all in one list, alternating between primary and secondary + network_ip = google_compute_address.sap_hana_ha_worker_vm_ip[count.index * 2].address + + nic_type = var.nic_type == "" ? null : var.nic_type + # we only include access_config if public_ip is true, an empty access_config + # will create an ephemeral public ip + dynamic "access_config" { + for_each = var.public_ip ? [1] : [] + content { + } + } + } + + tags = local.network_tags + + service_account { + # The default empty service account string will use the projects default compute engine service account + email = var.service_account + scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + + dynamic "reservation_affinity" { + for_each = length(var.primary_reservation_name) > 1 ? [1] : [] + content { + type = "SPECIFIC_RESERVATION" + specific_reservation { + key = "compute.googleapis.com/reservation-name" + values = [var.primary_reservation_name] + } + } + } + + labels = local.wlm_labels + + metadata = merge( + { + startup-script = local.worker_startup_url + post_deployment_script = var.post_deployment_script + sap_deployment_debug = var.sap_deployment_debug + sap_hana_deployment_bucket = var.sap_hana_deployment_bucket + sap_hana_sid = var.sap_hana_sid + sap_hana_instance_number = var.sap_hana_instance_number + sap_hana_sidadm_password = var.sap_hana_sidadm_password + sap_hana_sidadm_password_secret = var.sap_hana_sidadm_password_secret + # wording on system_password may be inconsitent with DM + sap_hana_system_password = var.sap_hana_system_password + sap_hana_system_password_secret = var.sap_hana_system_password_secret + sap_hana_sidadm_uid = var.sap_hana_sidadm_uid + sap_hana_sapsys_gid = var.sap_hana_sapsys_gid + sap_vip = var.sap_vip + sap_vip_solution = local.sap_vip_solution + sap_hc_port = local.sap_hc_port + sap_primary_instance = var.primary_instance_name + sap_secondary_instance = var.secondary_instance_name + sap_primary_zone = var.primary_zone + sap_secondary_zone = var.secondary_zone + use_single_shared_data_log_disk = var.use_single_shared_data_log_disk + sap_hana_backup_disk = var.include_backup_disk + sap_hana_shared_disk = !var.use_single_shared_data_log_disk + sap_hana_scaleout_nodes = var.sap_hana_scaleout_nodes + majority_maker_instance_name = local.mm_fully_defined ? var.majority_maker_instance_name : "" + enable_fast_restart = var.enable_fast_restart + native_bm = local.native_bm + template-type = "TERRAFORM" + }, + local.wlm_metadata + ) + + lifecycle { + # Ignore changes in the instance metadata, since it is modified by the SAP startup script. + ignore_changes = [metadata] + } +} + + ################################################################################ # Secondary Instance ################################################################################ ################################################################################ # disks ################################################################################ -resource "google_compute_disk" "sap_hana_ha_secondary_boot_disk" { - name = "${var.secondary_instance_name}-boot" - type = "pd-standard" +resource "google_compute_disk" "sap_hana_ha_secondary_boot_disks" { + count = var.sap_hana_scaleout_nodes + 1 + name = count.index == 0 ? "${var.secondary_instance_name}-boot" : "${var.secondary_instance_name}w${count.index}-boot" + type = local.native_bm ? "hyperdisk-balanced" : "pd-balanced" zone = var.secondary_zone size = local.default_boot_size project = var.project_id @@ -311,19 +738,68 @@ resource "google_compute_disk" "sap_hana_ha_secondary_boot_disk" { ignore_changes = [image] } } -resource "google_compute_disk" "sap_hana_ha_secondary_pdssd_disk" { - name = "${var.secondary_instance_name}-pdssd" - type = "pd-ssd" - zone = var.secondary_zone - size = local.pdssd_size - project = var.project_id +resource "google_compute_disk" "sap_hana_ha_secondary_unified_disks" { + count = var.use_single_shared_data_log_disk ? var.sap_hana_scaleout_nodes + 1 : 0 + name = count.index == 0 ? "${var.secondary_instance_name}-hana" : "${var.secondary_instance_name}w${count.index}-hana" + type = local.final_disk_type + zone = var.secondary_zone + size = local.unified_pd_size + project = var.project_id + provisioned_iops = local.final_unified_iops + provisioned_throughput = local.final_unified_throughput +} + +# Split data/log/sap disks +resource "google_compute_disk" "sap_hana_ha_secondary_data_disks" { + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = count.index == 0 ? "${var.secondary_instance_name}-data" : "${var.secondary_instance_name}w${count.index}-data" + type = local.final_data_disk_type + zone = var.secondary_zone + size = local.data_pd_size + project = var.project_id + provisioned_iops = local.final_data_iops + provisioned_throughput = local.final_data_throughput +} +resource "google_compute_disk" "sap_hana_ha_secondary_log_disks" { + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = count.index == 0 ? "${var.secondary_instance_name}-log" : "${var.secondary_instance_name}w${count.index}-log" + type = local.final_log_disk_type + zone = var.secondary_zone + size = local.log_pd_size + project = var.project_id + provisioned_iops = local.final_log_iops + provisioned_throughput = local.final_log_throughput +} +resource "google_compute_disk" "sap_hana_ha_secondary_shared_disk" { + count = var.use_single_shared_data_log_disk ? 0 : 1 + name = "${var.secondary_instance_name}-shared" + type = local.final_shared_disk_type + zone = var.secondary_zone + size = local.shared_pd_size + project = var.project_id + provisioned_iops = local.final_shared_iops + provisioned_throughput = local.final_shared_throughput +} +resource "google_compute_disk" "sap_hana_ha_secondary_usrsap_disks" { + count = var.use_single_shared_data_log_disk ? 0 : var.sap_hana_scaleout_nodes + 1 + name = count.index == 0 ? "${var.secondary_instance_name}-usrsap" : "${var.secondary_instance_name}w${count.index}-usrsap" + type = local.final_usrsap_disk_type + zone = var.secondary_zone + size = local.usrsap_pd_size + project = var.project_id + provisioned_iops = local.final_usrsap_iops + provisioned_throughput = local.final_usrsap_throughput } + resource "google_compute_disk" "sap_hana_ha_secondary_backup_disk" { - name = "${var.secondary_instance_name}-backup" - type = "pd-standard" - zone = var.secondary_zone - size = local.pdhdd_size - project = var.project_id + count = var.include_backup_disk ? 1 : 0 + name = "${var.secondary_instance_name}-backup" + type = local.final_backup_disk_type + zone = var.secondary_zone + size = local.backup_pd_size + project = var.project_id + provisioned_iops = local.final_backup_iops + provisioned_throughput = local.final_backup_throughput } ################################################################################ @@ -340,17 +816,58 @@ resource "google_compute_instance" "sap_hana_ha_secondary_instance" { boot_disk { auto_delete = true device_name = "boot" - source = google_compute_disk.sap_hana_ha_secondary_boot_disk.self_link + source = google_compute_disk.sap_hana_ha_secondary_boot_disks[0].self_link } - attached_disk { - device_name = google_compute_disk.sap_hana_ha_secondary_pdssd_disk.name - source = google_compute_disk.sap_hana_ha_secondary_pdssd_disk.self_link + dynamic "scheduling" { + for_each = local.native_bm ? [1] : [] + content { + on_host_maintenance = "TERMINATE" + } } - attached_disk { - device_name = google_compute_disk.sap_hana_ha_secondary_backup_disk.name - source = google_compute_disk.sap_hana_ha_secondary_backup_disk.self_link + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [1] : [] + content { + device_name = google_compute_disk.sap_hana_ha_secondary_unified_disks[0].name + source = google_compute_disk.sap_hana_ha_secondary_unified_disks[0].self_link + } + } + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_secondary_data_disks[0].name + source = google_compute_disk.sap_hana_ha_secondary_data_disks[0].self_link + } + } + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_secondary_log_disks[0].name + source = google_compute_disk.sap_hana_ha_secondary_log_disks[0].self_link + } + } + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_secondary_shared_disk[0].name + source = google_compute_disk.sap_hana_ha_secondary_shared_disk[0].self_link + } + } + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_secondary_usrsap_disks[0].name + source = google_compute_disk.sap_hana_ha_secondary_usrsap_disks[0].self_link + } + } + + dynamic "attached_disk" { + for_each = var.include_backup_disk ? [1] : [] + content { + device_name = google_compute_disk.sap_hana_ha_secondary_backup_disk[0].name + source = google_compute_disk.sap_hana_ha_secondary_backup_disk[0].self_link + } } can_ip_forward = var.can_ip_forward @@ -358,6 +875,7 @@ resource "google_compute_instance" "sap_hana_ha_secondary_instance" { network_interface { subnetwork = local.subnetwork_uri network_ip = google_compute_address.sap_hana_ha_vm_ip[1].address + nic_type = var.nic_type == "" ? null : var.nic_type # we only include access_config if public_ip is true, an empty access_config # will create an ephemeral public ip dynamic "access_config" { @@ -392,7 +910,7 @@ resource "google_compute_instance" "sap_hana_ha_secondary_instance" { metadata = merge( { - startup-script = var.secondary_startup_url + startup-script = local.secondary_startup_url post_deployment_script = var.post_deployment_script sap_deployment_debug = var.sap_deployment_debug sap_hana_deployment_bucket = var.sap_hana_deployment_bucket @@ -412,6 +930,13 @@ resource "google_compute_instance" "sap_hana_ha_secondary_instance" { sap_secondary_instance = var.secondary_instance_name sap_primary_zone = var.primary_zone sap_secondary_zone = var.secondary_zone + use_single_shared_data_log_disk = var.use_single_shared_data_log_disk + sap_hana_backup_disk = var.include_backup_disk + sap_hana_shared_disk = !var.use_single_shared_data_log_disk + sap_hana_scaleout_nodes = var.sap_hana_scaleout_nodes + majority_maker_instance_name = local.mm_fully_defined ? var.majority_maker_instance_name : "" + enable_fast_restart = var.enable_fast_restart + native_bm = local.native_bm template-type = "TERRAFORM" }, local.wlm_metadata @@ -423,6 +948,135 @@ resource "google_compute_instance" "sap_hana_ha_secondary_instance" { } } +resource "google_compute_instance" "sap_hana_ha_secondary_workers" { + count = var.sap_hana_scaleout_nodes + name = "${var.secondary_instance_name}w${count.index + 1}" + machine_type = var.machine_type + zone = var.secondary_zone + project = var.project_id + + min_cpu_platform = lookup(local.cpu_platform_map, var.machine_type, "Automatic") + + boot_disk { + auto_delete = true + device_name = "boot" + source = google_compute_disk.sap_hana_ha_secondary_boot_disks[count.index + 1].self_link + } + + dynamic "scheduling" { + for_each = local.native_bm ? [1] : [] + content { + on_host_maintenance = "TERMINATE" + } + } + + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [1] : [] + content { + device_name = google_compute_disk.sap_hana_ha_secondary_unified_disks[count.index + 1].name + source = google_compute_disk.sap_hana_ha_secondary_unified_disks[count.index + 1].self_link + } + } + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_secondary_data_disks[count.index + 1].name + source = google_compute_disk.sap_hana_ha_secondary_data_disks[count.index + 1].self_link + } + } + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_secondary_log_disks[count.index + 1].name + source = google_compute_disk.sap_hana_ha_secondary_log_disks[count.index + 1].self_link + } + } + dynamic "attached_disk" { + for_each = var.use_single_shared_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_ha_secondary_usrsap_disks[count.index + 1].name + source = google_compute_disk.sap_hana_ha_secondary_usrsap_disks[count.index + 1].self_link + } + } + + can_ip_forward = var.can_ip_forward + + network_interface { + subnetwork = local.subnetwork_uri + # The worker node IPs are all in one list, alternating between primary and secondary + network_ip = google_compute_address.sap_hana_ha_worker_vm_ip[count.index * 2 + 1].address + nic_type = var.nic_type == "" ? null : var.nic_type + # we only include access_config if public_ip is true, an empty access_config + # will create an ephemeral public ip + dynamic "access_config" { + for_each = var.public_ip ? [1] : [] + content { + } + } + } + + tags = local.network_tags + + service_account { + # An empty string service account will default to the projects default compute engine service account + email = var.service_account + scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + + dynamic "reservation_affinity" { + for_each = length(var.secondary_reservation_name) > 1 ? [1] : [] + content { + type = "SPECIFIC_RESERVATION" + specific_reservation { + key = "compute.googleapis.com/reservation-name" + values = [var.secondary_reservation_name] + } + } + } + + labels = local.wlm_labels + + metadata = merge( + { + startup-script = local.worker_startup_url + post_deployment_script = var.post_deployment_script + sap_deployment_debug = var.sap_deployment_debug + sap_hana_deployment_bucket = var.sap_hana_deployment_bucket + sap_hana_sid = var.sap_hana_sid + sap_hana_instance_number = var.sap_hana_instance_number + sap_hana_sidadm_password = var.sap_hana_sidadm_password + sap_hana_sidadm_password_secret = var.sap_hana_sidadm_password_secret + # wording on system_password may be inconsitent with DM + sap_hana_system_password = var.sap_hana_system_password + sap_hana_system_password_secret = var.sap_hana_system_password_secret + sap_hana_sidadm_uid = var.sap_hana_sidadm_uid + sap_hana_sapsys_gid = var.sap_hana_sapsys_gid + sap_vip = var.sap_vip + sap_vip_solution = local.sap_vip_solution + sap_hc_port = local.sap_hc_port + sap_primary_instance = var.primary_instance_name + sap_secondary_instance = var.secondary_instance_name + sap_primary_zone = var.primary_zone + sap_secondary_zone = var.secondary_zone + use_single_shared_data_log_disk = var.use_single_shared_data_log_disk + sap_hana_backup_disk = var.include_backup_disk + sap_hana_shared_disk = !var.use_single_shared_data_log_disk + sap_hana_scaleout_nodes = var.sap_hana_scaleout_nodes + majority_maker_instance_name = local.mm_fully_defined ? var.majority_maker_instance_name : "" + enable_fast_restart = var.enable_fast_restart + native_bm = local.native_bm + template-type = "TERRAFORM" + }, + local.wlm_metadata + ) + + lifecycle { + # Ignore changes in the instance metadata, since it is modified by the SAP startup script. + ignore_changes = [metadata] + } +} ################################################################################ # Optional ILB for VIP @@ -449,11 +1103,13 @@ resource "google_compute_region_backend_service" "sap_hana_ha_loadbalancer" { health_checks = [google_compute_health_check.sap_hana_ha_loadbalancer_hc.self_link] backend { - group = google_compute_instance_group.sap_hana_ha_primary_instance_group.self_link + group = google_compute_instance_group.sap_hana_ha_primary_instance_group.self_link + failover = false } backend { - group = google_compute_instance_group.sap_hana_ha_secondary_instance_group.self_link + group = google_compute_instance_group.sap_hana_ha_secondary_instance_group.self_link + failover = true } protocol = "TCP" @@ -510,3 +1166,111 @@ resource "google_compute_firewall" "sap_hana_ha_vpc_firewall" { ports = [local.sap_hc_port] } } + +################################################################################ +# Local variables +################################################################################ + +resource "google_compute_disk" "sap_majority_maker_boot_disk" { + count = local.mm_fully_defined ? 1 : 0 + name = "${var.majority_maker_instance_name}-boot" + type = "pd-balanced" + zone = var.majority_maker_zone + size = local.default_boot_size + project = var.project_id + image = local.os_full_name + lifecycle { + # Ignores newer versions of the OS image. Removing this lifecycle + # and re-applying will cause the current disk to be deleted. + # All existing data will be lost. + ignore_changes = [image] + } +} + +resource "google_compute_address" "sap_hana_majority_maker_vm_ip" { + count = local.mm_fully_defined ? 1 : 0 + name = "${var.majority_maker_instance_name}-ip" + subnetwork = local.subnetwork_uri + address_type = "INTERNAL" + region = local.region + project = var.project_id +} + +resource "google_compute_instance" "sap_majority_maker_instance" { + count = local.mm_fully_defined ? 1 : 0 + name = var.majority_maker_instance_name + machine_type = var.majority_maker_machine_type + zone = var.majority_maker_zone + project = var.project_id + + min_cpu_platform = lookup(local.cpu_platform_map, var.majority_maker_machine_type, "Automatic") + boot_disk { + auto_delete = true + device_name = "boot" + source = google_compute_disk.sap_majority_maker_boot_disk[0].self_link + } + + can_ip_forward = var.can_ip_forward + network_interface { + subnetwork = local.subnetwork_uri + network_ip = google_compute_address.sap_hana_majority_maker_vm_ip[0].address + nic_type = var.nic_type == "" ? null : var.nic_type + # we only include access_config if public_ip is true, an empty access_config + # will create an ephemeral public ip + dynamic "access_config" { + for_each = var.public_ip ? [1] : [] + content { + } + } + } + tags = local.network_tags + service_account { + # The default empty service account string will use the projects default compute engine service account + email = var.service_account + scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + + metadata = merge( + { + startup-script = local.mm_startup_url + sap_deployment_debug = var.sap_deployment_debug + primary = var.primary_instance_name + secondary = var.secondary_instance_name + post_deployment_script = var.post_deployment_script + sap_hana_deployment_bucket = var.sap_hana_deployment_bucket + sap_hana_sid = var.sap_hana_sid + sap_hana_instance_number = var.sap_hana_instance_number + sap_hana_sidadm_password = var.sap_hana_sidadm_password + sap_hana_sidadm_password_secret = var.sap_hana_sidadm_password_secret + # wording on system_password may be inconsitent with DM + sap_hana_system_password = var.sap_hana_system_password + sap_hana_system_password_secret = var.sap_hana_system_password_secret + sap_hana_sidadm_uid = var.sap_hana_sidadm_uid + sap_hana_sapsys_gid = var.sap_hana_sapsys_gid + sap_vip = var.sap_vip + sap_vip_solution = local.sap_vip_solution + sap_hc_port = local.sap_hc_port + sap_primary_instance = var.primary_instance_name + sap_secondary_instance = var.secondary_instance_name + sap_primary_zone = var.primary_zone + sap_secondary_zone = var.secondary_zone + use_single_shared_data_log_disk = var.use_single_shared_data_log_disk + sap_hana_backup_disk = var.include_backup_disk + sap_hana_shared_disk = !var.use_single_shared_data_log_disk + sap_hana_scaleout_nodes = var.sap_hana_scaleout_nodes + majority_maker_instance_name = local.mm_fully_defined ? var.majority_maker_instance_name : "" + template-type = "TERRAFORM" + }, + local.wlm_metadata + ) + + lifecycle { + # Ignore changes in the instance metadata, since it is modified by the SAP startup script. + ignore_changes = [metadata] + } +} + + + diff --git a/modules/sap_hana_ha/outputs.tf b/modules/sap_hana_ha/outputs.tf index ad6e0a0f..e2023a3d 100644 --- a/modules/sap_hana_ha/outputs.tf +++ b/modules/sap_hana_ha/outputs.tf @@ -13,15 +13,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - output "sap_hana_ha_primary_instance_self_link" { description = "Self-link for the primary SAP HANA HA instance created." value = google_compute_instance.sap_hana_ha_primary_instance.self_link } +output "sap_hana_ha_primary_worker_self_links" { + description = "Self-link for the worker nodes in the primary SAP HANA HA instance." + value = google_compute_instance.sap_hana_ha_primary_workers[*].self_link +} output "sap_hana_ha_secondary_instance_self_link" { description = "Self-link for the secondary SAP HANA HA instance created." value = google_compute_instance.sap_hana_ha_secondary_instance.self_link } +output "sap_hana_ha_secondary_worker_self_links" { + description = "Self-link for the worker nodes in the secondary SAP HANA HA instance." + value = google_compute_instance.sap_hana_ha_secondary_workers[*].self_link +} output "sap_hana_ha_loadbalander_link" { description = "Link to the optional load balancer" value = google_compute_region_backend_service.sap_hana_ha_loadbalancer[*].self_link diff --git a/modules/sap_hana_ha/variables.tf b/modules/sap_hana_ha/variables.tf index d29c168c..fe96bdbb 100644 --- a/modules/sap_hana_ha/variables.tf +++ b/modules/sap_hana_ha/variables.tf @@ -13,7 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - variable "project_id" { type = string description = "Project id where the instances will be created." @@ -69,8 +68,12 @@ variable "secondary_instance_name" { variable "sap_hana_deployment_bucket" { type = string - description = "The GCS bucket containing the SAP HANA media. If this is not defined, the GCE instance will be provisioned without SAP HANA installed." - default = "" + description = "The Cloud Storage path that contains the SAP HANA media, do not include gs://. If this is not defined, the GCE instance will be provisioned without SAP HANA installed." + validation { + condition = (!(length(regexall("gs:", var.sap_hana_deployment_bucket)) > 0)) + error_message = "The sap_hana_deployment_bucket must only contain the Cloud Storage path, which includes the bucket name and the names of any folders. Do not include gs://." + } + default = "" } variable "sap_hana_sid" { @@ -253,9 +256,262 @@ variable "post_deployment_script" { default = "" } +variable "nic_type" { + type = string + description = "Optional - This value determines the type of NIC to use, valid options are GVNIC and VIRTIO_NET. If choosing GVNIC make sure that it is supported by your OS choice here https://cloud.google.com/compute/docs/images/os-details#networking." + validation { + condition = contains(["VIRTIO_NET", "GVNIC", ""], var.nic_type) + error_message = "The nic_type must be either GVNIC or VIRTIO_NET." + } + default = "" +} + +variable "disk_type" { + type = string + description = "Optional - The default disk type to use for disk(s) containing log and data volumes. The default is hyperdisk-extreme for native bare metal machines and pd-ssd otherwise. Not all disk are supported on all machine types - see https://cloud.google.com/compute/docs/disks/ for details." + validation { + condition = contains(["", "pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme"], var.disk_type) + error_message = "The disk_type must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "" +} + +variable "use_single_shared_data_log_disk" { + type = bool + description = "Optional - By default three separate disk for data, logs, and shared will be made. If set to true, one disk will be used instead." + default = false +} + +variable "include_backup_disk" { + type = bool + description = "Optional - The default is true. If set creates a disk for backups." + default = true +} + +variable "sap_hana_scaleout_nodes" { + type = number + description = "Optional - Specify to add scaleout nodes to both HA instances." + default = 0 +} + +variable "majority_maker_instance_name" { + type = string + description = "Optional - Name to use for the Majority Maker instance. Must be provided if scaleout_nodes > 0." + default = "" +} + +variable "majority_maker_machine_type" { + type = string + description = "Optional - The machine type to use for the Majority Maker instance. Must be provided if scaleout_nodes > 0." + default = "" +} + +variable "majority_maker_zone" { + type = string + description = "Optional - The zone in which the Majority Maker instance will be deployed. Must be provided if scaleout_nodes > 0. It is recommended for this to be different from the zones the primary and secondary instance are deployed in." + default = "" +} + +variable "primary_static_ip" { + type = string + description = "Optional - Defines an internal static IP for the primary VM." + validation { + condition = var.primary_static_ip == "" || can(regex("^(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$", var.primary_static_ip)) + error_message = "The primary_static_ip must be a valid IP address." + } + default = "" +} + +variable "secondary_static_ip" { + type = string + description = "Optional - Defines an internal static IP for the secondary VM." + validation { + condition = var.secondary_static_ip == "" || can(regex("^(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$", var.secondary_static_ip)) + error_message = "The secondary_static_ip must be a valid IP address." + } + default = "" +} + +variable "primary_worker_static_ips" { + type = list(string) + description = "Optional - Defines internal static IP addresses for the primary worker nodes." + validation { + condition = alltrue([ + for ip in var.primary_worker_static_ips : ip == "" || can(regex("^(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$", ip)) + ]) + error_message = "All primary_worker_static_ips must be valid IP addresses." + } + default = [] +} + +variable "secondary_worker_static_ips" { + type = list(string) + description = "Optional - Defines internal static IP addresses for the secondary worker nodes." + validation { + condition = alltrue([ + for ip in var.secondary_worker_static_ips : ip == "" || can(regex("^(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$", ip)) + ]) + error_message = "All secondary_worker_static_ips must be valid IP addresses." + } + default = [] +} + + +variable "backup_disk_type" { + type = string + description = "Optional - The default is hyperdisk-balanced for native bare metal machines and pd-balanced otherwise, only used if a backup disk is needed." + validation { + condition = contains(["", "pd-ssd", "pd-balanced", "pd-standard", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme"], var.backup_disk_type) + error_message = "The disk_type must be either pd-ssd, pd-balanced, pd-standard, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "" +} + +variable "hyperdisk_balanced_iops_default" { + type = number + description = "Optional - default is 3000. Number of IOPS that is set for each disk of type Hyperdisk-balanced (except for boot/usrsap/shared disks)." + default = 3000 +} + +variable "hyperdisk_balanced_throughput_default" { + type = number + description = "Optional - default is 750. Throughput in MB/s that is set for each disk of type Hyperdisk-balanced (except for boot/usrsap/shared disks)." + default = 750 +} + +variable "enable_fast_restart" { + type = bool + description = "Optional - The default is true. If set enables HANA Fast Restart." + default = true +} + # -# DO NOT MODIFY unless you know what you are doing +# DO NOT MODIFY unless instructed or aware of the implications of using those settings # + +variable "data_disk_type_override" { + type = string + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the data disk." + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.data_disk_type_override) + error_message = "The data_disk_type_override must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "" +} +variable "log_disk_type_override" { + type = string + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the log disk." + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.log_disk_type_override) + error_message = "The log_disk_type_override must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "" +} +variable "shared_disk_type_override" { + type = string + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the shared disk." + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.shared_disk_type_override) + error_message = "The shared_disk_type_override must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "" +} +variable "usrsap_disk_type_override" { + type = string + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the /usr/sap disk." + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.usrsap_disk_type_override) + error_message = "The usrsap_disk_type_override must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "" +} +variable "unified_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the primary disk(s), that is based off of the machine_type." + default = null +} +variable "data_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the data disk(s), that is based off of the machine_type." + default = null +} +variable "log_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the log disk(s), that is based off of the machine_type." + default = null +} +variable "shared_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the shared disk, that is based off of the machine_type." + default = null +} +variable "usrsap_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the /usr/sap disk(s), that is based off of the machine_type." + default = null +} +variable "unified_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the primary's unified disk will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "data_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the data disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "log_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the log disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "shared_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the shared disk will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "usrsap_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the /usr/sap disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "backup_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the backup disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} + +variable "unified_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the primary's unified disk will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "data_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the data disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "log_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the log disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "shared_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the shared disk will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "usrsap_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the /usr/sap disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "backup_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the backup disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} + variable "wlm_deployment_name" { type = string description = "Deployment name to be used for integrating into Work Load Management." @@ -271,17 +527,29 @@ variable "is_work_load_management_deployment" { variable "primary_startup_url" { type = string description = "Startup script to be executed when the VM boots, should not be overridden." - default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202210141928/terraform/sap_hana_ha/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202210141928/terraform" + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana_ha/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform" +} + +variable "worker_startup_url" { + type = string + description = "Startup script to be executed when the worker VM boots, should not be overridden." + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana_ha/startup_worker.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform" } variable "secondary_startup_url" { type = string - default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202210141928/terraform/sap_hana_ha/startup_secondary.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202210141928/terraform" + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana_ha/startup_secondary.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform" description = "DO NOT USE" } +variable "majority_maker_startup_url" { + type = string + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana_ha/startup_majority_maker.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform" + description = "DO NOT USE" +} variable "can_ip_forward" { type = bool description = "Whether sending and receiving of packets with non-matching source or destination IPs is allowed." default = true } + diff --git a/modules/sap_hana_ha/versions.tf b/modules/sap_hana_ha/versions.tf index 4f9034cf..85173682 100644 --- a/modules/sap_hana_ha/versions.tf +++ b/modules/sap_hana_ha/versions.tf @@ -13,7 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - terraform { required_version = ">=0.12.6" required_providers { @@ -21,6 +20,14 @@ terraform { source = "hashicorp/google" version = ">= 4.0.0, < 6" } + assert = { + source = "bwoznicki/assert" + version = "0.0.1" + } + validation = { + source = "tlkamp/validation" + version = "1.0.0" + } } provider_meta "google" { diff --git a/modules/sap_hana_scaleout/README.md b/modules/sap_hana_scaleout/README.md index 59be3110..63852a8f 100644 --- a/modules/sap_hana_scaleout/README.md +++ b/modules/sap_hana_scaleout/README.md @@ -1,32 +1,40 @@ -# terraform-google-sap for SAP HANA Scaleout +# Terraform for SAP HANA Scaleout for Google Cloud This template follows the documented steps -https://cloud.google.com/solutions/sap/docs/sap-hana-ha-scaleout-tf-deployment-guide and deploys an SAP HANA scale-out system that includes the SAP HANA host auto-failover fault-recovery solution. +https://cloud.google.com/solutions/sap/docs/certifications-sap-hana and deploys +GCP and Pacemaker resources up to the installation of SAP's central services. -## Usage +## Set up Terraform -Basic usage of this module is as follows: +Install Terraform on the machine you would like to use to deploy from by +following +https://learn.hashicorp.com/tutorials/terraform/install-cli?in=terraform/gcp-get-started#install-terraform -```hcl -module "hana_scaleout" { - source = "terraform-google-modules/sap/google//modules/sap_hana_scaleout" - version = "~> 1.1" +## How to deploy - project_id = "PROJECT_ID" # example: my-project-x - zone = "ZONE" # example: us-east1-b - machine_type = "MACHINE_TYPE" # example: n1-highmem-32 - subnetwork = "SUBNETWORK" # example: default - linux_image = "LINUX_IMAGE" # example: rhel-8-4-sap-ha - linux_image_project = "LINUX_IMAGE_PROJECT" # example: rhel-sap-cloud - instance_name = "VM_NAME" # example: hana-instance - sap_hana_sid = "SID" # example: ABC, Must conform to [a-zA-Z][a-zA-Z0-9]{2} - sap_hana_shared_nfs = "HANA_SHARED_NFS" # example: 10.10.10.10:/shared - sap_hana_backup_nfs = "HANA_BACKUP_NFS" # example: 10.10.10.10:/backup -} -``` +1. Download .tf file into an empty directory `curl + https://storage.googleapis.com/cloudsapdeploy/terraform/latest/terraform/sap_hana_scaleout/terraform/sap_hana_scaleout.tf + -o sap_hana_scaleout.tf` -Functional example is included in the -[examples](../../examples/sap_hana_scaleout) directory. +2. Fill in mandatory variables and if the desired optional variable in the .tf + file. + +3. Deploy + + 1. Run `terraform init` (only needed once) + 2. Run `terraform plan` to see what is going to be deployed. Verify if + names, zones, sizes, etc. are as desired. + 3. Run `terrafom apply` to deploy the resources + 4. Run `terrafom destroy` to remove the resources + +4. Continue installation of SAP software and setup of remaining cluster + resources as per documentation at + https://cloud.google.com/solutions/sap/docs/sap-hana-ha-scaleout-tf-deployment-guide + +## Additional information + +For additional information see https://www.terraform.io/docs/index.html and +https://cloud.google.com/docs/terraform ## Inputs @@ -34,20 +42,33 @@ Functional example is included in the | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | can\_ip\_forward | Whether sending and receiving of packets with non-matching source or destination IPs is allowed. | `bool` | `true` | no | +| data\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the data disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| data\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the data disk(s), that is based off of the machine\_type. | `number` | `null` | no | +| data\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the data disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| data\_disk\_type\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default\_disk\_type' for the data disk. | `string` | `""` | no | +| disk\_type | Optional - The default disk type to use on all disks deployed. Extreme disks are not supported on all machine types. See https://cloud.google.com/compute/docs/disks/ for details. | `string` | `"pd-ssd"` | no | +| hyperdisk\_balanced\_iops\_default | Optional - default is 3000. Number of IOPS that is set for each disk of type Hyperdisk-balanced (except for boot disk). | `number` | `3000` | no | +| hyperdisk\_balanced\_throughput\_default | Optional - default is 750. Throughput in MB/s that is set for each disk of type Hyperdisk-balanced (except for boot disk). | `number` | `750` | no | | instance\_name | Hostname of the GCE instance. | `string` | n/a | yes | | linux\_image | Linux image name to use. | `string` | n/a | yes | | linux\_image\_project | The project which the Linux image belongs to. | `string` | n/a | yes | +| log\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the log disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| log\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the log disk(s), that is based off of the machine\_type. | `number` | `null` | no | +| log\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the log disk(s) will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| log\_disk\_type\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default\_disk\_type' for the log disk. | `string` | `""` | no | | machine\_type | Machine type for the instances. | `string` | n/a | yes | | network\_tags | OPTIONAL - Network tags can be associated to your instance on deployment. This can be used for firewalling or routing purposes. | `list(string)` | `[]` | no | +| nic\_type | Optional - This value determines the type of NIC to use, valid options are GVNIC and VIRTIO\_NET. If choosing GVNIC make sure that it is supported by your OS choice here https://cloud.google.com/compute/docs/images/os-details#networking. | `string` | `""` | no | | post\_deployment\_script | OPTIONAL - gs:// or https:// location of a script to execute on the created VM's post deployment. | `string` | `""` | no | -| primary\_startup\_url | Startup script to be executed when the VM boots, should not be overridden. | `string` | `"curl -s BUILD.TERRA_SH_URL/sap_hana_scaleout/startup.sh | bash -s BUILD.TERRA_SH_URL"` | no | +| primary\_startup\_url | Startup script to be executed when the VM boots, should not be overridden. | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana_scaleout/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform"` | no | | project\_id | Project id where the instances will be created. | `string` | n/a | yes | | public\_ip | OPTIONAL - Defines whether a public IP address should be added to your VM. By default this is set to Yes. Note that if you set this to No without appropriate network nat and tags in place, there will be no route to the internet and thus the installation will fail. | `bool` | `true` | no | | reservation\_name | Use a reservation specified by RESERVATION\_NAME.
By default ANY\_RESERVATION is used when this variable is empty.
In order for a reservation to be used it must be created with the
"Select specific reservation" selected (specificReservationRequired set to true)
Be sure to create your reservation with the correct Min CPU Platform for the
following instance types:
n1-highmem-32 : Intel Broadwell
n1-highmem-64 : Intel Broadwell
n1-highmem-96 : Intel Skylake
n1-megamem-96 : Intel Skylake
m1-megamem-96 : Intel Skylake
All other instance types can have automatic Min CPU Platform" | `string` | `""` | no | | sap\_deployment\_debug | OPTIONAL - If this value is set to true, the deployment will generates verbose deployment logs. Only turn this setting on if a Google support engineer asks you to enable debugging. | `bool` | `false` | no | | sap\_hana\_backup\_nfs | Google Filestore share for /hanabackup | `string` | n/a | yes | -| sap\_hana\_deployment\_bucket | The GCS bucket containing the SAP HANA media. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. | `string` | `""` | no | +| sap\_hana\_deployment\_bucket | The Cloud Storage path that contains the SAP HANA media, do not include gs://. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. | `string` | `""` | no | | sap\_hana\_instance\_number | The SAP instance number. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. | `number` | `0` | no | +| sap\_hana\_sapsys\_gid | The Linux GID of the SAPSYS group. By default this is set to 79 | `number` | `79` | no | | sap\_hana\_shared\_nfs | Google Filestore share for /hana/shared | `string` | n/a | yes | | sap\_hana\_sid | The SAP HANA SID. SID must adhere to SAP standard (Three letters or numbers and start with a letter) | `string` | n/a | yes | | sap\_hana\_sidadm\_password | The linux sidadm login password. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. Minimum requirement is 8 characters. | `string` | `""` | no | @@ -57,9 +78,16 @@ Functional example is included in the | sap\_hana\_system\_password | The SAP HANA SYSTEM password. If this is not defined, the GCE instance will be provisioned without SAP HANA installed. Minimum requirement is 8 characters with at least 1 number. | `string` | `""` | no | | sap\_hana\_system\_password\_secret | The secret key used to retrieve the SAP HANA SYSTEM login from Secret Manager (https://cloud.google.com/secret-manager). The Secret Manager password will overwrite the clear text password from sap\_hana\_system\_password if both are set. | `string` | `""` | no | | sap\_hana\_worker\_nodes | Number of worker nodes to create.
This is in addition to the primary node. | `number` | `1` | no | -| secondary\_startup\_url | DO NOT USE | `string` | `"curl -s BUILD.TERRA_SH_URL/sap_hana_scaleout/startup_secondary.sh | bash -s BUILD.TERRA_SH_URL"` | no | +| secondary\_startup\_url | DO NOT USE | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana_scaleout/startup_secondary.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform"` | no | | service\_account | OPTIONAL - Ability to define a custom service account instead of using the default project service account. | `string` | `""` | no | +| standby\_static\_ips | Optional - Defines internal static IP addresses for the standby nodes. | `list(string)` | `[]` | no | | subnetwork | The sub network to deploy the instance in. | `string` | n/a | yes | +| unified\_disk\_iops\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the primary's unified disk will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| unified\_disk\_size\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the primary's unified disk, that is based off of the machine\_type. | `number` | `null` | no | +| unified\_disk\_throughput\_override | Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the primary's unified disk will use. Has no effect if not using a disk type that supports it. | `number` | `null` | no | +| use\_single\_data\_log\_disk | Optional - By default two separate disk for data and logs will be made. If set to true, one disk will be used instead. | `bool` | `false` | no | +| vm\_static\_ip | Optional - Defines an internal static IP for the VM. | `string` | `""` | no | +| worker\_static\_ips | Optional - Defines internal static IP addresses for the worker nodes. | `list(string)` | `[]` | no | | zone | Zone where the instances will be created. | `string` | n/a | yes | ## Outputs diff --git a/modules/sap_hana_scaleout/main.tf b/modules/sap_hana_scaleout/main.tf index 586a646c..11659512 100644 --- a/modules/sap_hana_scaleout/main.tf +++ b/modules/sap_hana_scaleout/main.tf @@ -13,13 +13,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - # # Terraform SAP HANA Scaleout for Google Cloud # # -# Version: BUILD.VERSION -# Build Hash: BUILD.HASH +# Version: 2.0.202404101403 +# Build Hash: c1f78e4d8c44de3be18fc7b3a64ccf60a94a85bc # ################################################################################ @@ -27,68 +26,174 @@ ################################################################################ locals { mem_size_map = { - "n1-highmem-32" = 208 - "n1-highmem-64" = 416 - "n1-highmem-96" = 624 - "n1-megamem-96" = 1433 - "n2-highmem-32" = 256 - "n2-highmem-48" = 386 - "n2-highmem-64" = 512 - "n2-highmem-80" = 640 - "n2-highmem-96" = 768 - "n2-highmem-128" = 864 - "n1-ultramem-40" = 961 - "n1-ultramem-80" = 1922 - "n1-ultramem-160" = 3844 - "m1-megamem-96" = 1433 - "m1-ultramem-40" = 961 - "m1-ultramem-80" = 1922 - "m1-ultramem-160" = 3844 - "m2-ultramem-208" = 5916 - "m2-megamem-416" = 5916 - "m2-hypermem-416" = 8832 - "m2-ultramem-416" = 11832 - "m3-megamem-64" = 976 - "m3-megamem-128" = 1952 - "m3-ultramem-32" = 976 - "m3-ultramem-64" = 1952 - "m3-ultramem-128" = 3904 + "n1-highmem-32" = 208 + "n1-highmem-64" = 416 + "n1-highmem-96" = 624 + "n1-megamem-96" = 1433 + "n2-highmem-32" = 256 + "n2-highmem-48" = 384 + "n2-highmem-64" = 512 + "n2-highmem-80" = 640 + "n2-highmem-96" = 768 + "n2-highmem-128" = 864 + "n1-ultramem-40" = 961 + "n1-ultramem-80" = 1922 + "n1-ultramem-160" = 3844 + "m1-megamem-96" = 1433 + "m1-ultramem-40" = 961 + "m1-ultramem-80" = 1922 + "m1-ultramem-160" = 3844 + "m2-ultramem-208" = 5888 + "m2-megamem-416" = 5888 + "m2-hypermem-416" = 8832 + "m2-ultramem-416" = 11744 + "m3-megamem-64" = 976 + "m3-megamem-128" = 1952 + "m3-ultramem-32" = 976 + "m3-ultramem-64" = 1952 + "m3-ultramem-128" = 3904 + "c3-standard-44" = 176 + "c3-highmem-44" = 352 + "c3-highmem-88" = 704 + "c3-highmem-176" = 1408 + "c3-standard-192-metal" = 768 + "c3-highcpu-192-metal" = 512 + "c3-highmem-192-metal" = 1536 + "x4-megamem-960-metal" = 16384 + "x4-megamem-1440-metal" = 24576 + "x4-megamem-1920-metal" = 32768 } cpu_platform_map = { - "n1-standard-16" = "Intel Haswell" - "n1-highmem-32" = "Intel Broadwell" - "n1-highmem-64" = "Intel Broadwell" - "n1-highmem-96" = "Intel Skylake" - "n1-megamem-96" = "Intel Skylake" - "n2-highmem-32" = "Automatic" - "n2-highmem-48" = "Automatic" - "n2-highmem-64" = "Automatic" - "n2-highmem-80" = "Automatic" - "n2-highmem-96" = "Automatic" - "n2-highmem-128" = "Automatic" - "n1-ultramem-40" = "Automatic" - "n1-ultramem-80" = "Automatic" - "n1-ultramem-160" = "Automatic" - "m1-megamem-96" = "Intel Skylake" - "m1-ultramem-40" = "Automatic" - "m1-ultramem-80" = "Automatic" - "m1-ultramem-160" = "Automatic" - "m2-ultramem-208" = "Automatic" - "m2-megamem-416" = "Automatic" - "m2-hypermem-416" = "Automatic" - "m2-ultramem-416" = "Automatic" - "m3-megamem-64" = "Automatic" - "m3-megamem-128" = "Automatic" - "m3-ultramem-32" = "Automatic" - "m3-ultramem-64" = "Automatic" - "m3-ultramem-128" = "Automatic" - } - mem_size = lookup(local.mem_size_map, var.machine_type, 320) - sap_hana_log_size_min = min(512, max(64, local.mem_size / 2)) - sap_hana_data_size_min = local.mem_size * 12 / 10 - - sap_hana_log_size = local.sap_hana_log_size_min - sap_hana_data_size = local.sap_hana_data_size_min + "n1-highmem-32" = "Intel Broadwell" + "n1-highmem-64" = "Intel Broadwell" + "n1-highmem-96" = "Intel Skylake" + "n1-megamem-96" = "Intel Skylake" + "n2-highmem-32" = "Automatic" + "n2-highmem-48" = "Automatic" + "n2-highmem-64" = "Automatic" + "n2-highmem-80" = "Automatic" + "n2-highmem-96" = "Automatic" + "n2-highmem-128" = "Automatic" + "n1-ultramem-40" = "Automatic" + "n1-ultramem-80" = "Automatic" + "n1-ultramem-160" = "Automatic" + "m1-megamem-96" = "Intel Skylake" + "m1-ultramem-40" = "Automatic" + "m1-ultramem-80" = "Automatic" + "m1-ultramem-160" = "Automatic" + "m2-ultramem-208" = "Automatic" + "m2-megamem-416" = "Automatic" + "m2-hypermem-416" = "Automatic" + "m2-ultramem-416" = "Automatic" + "m3-megamem-64" = "Automatic" + "m3-megamem-128" = "Automatic" + "m3-ultramem-32" = "Automatic" + "m3-ultramem-64" = "Automatic" + "m3-ultramem-128" = "Automatic" + "c3-standard-44" = "Automatic" + "c3-highmem-44" = "Automatic" + "c3-highmem-88" = "Automatic" + "c3-highmem-176" = "Automatic" + "c3-standard-192-metal" = "Automatic" + "c3-highcpu-192-metal" = "Automatic" + "c3-highmem-192-metal" = "Automatic" + "x4-megamem-960-metal" = "Automatic" + "x4-megamem-1440-metal" = "Automatic" + "x4-megamem-1920-metal" = "Automatic" + } + + native_bm = length(regexall("metal", var.machine_type)) > 0 + + # Minimum disk sizes are used to ensure throughput. Extreme disks don't need this. + # All 'over provisioned' capacity is to go onto the data disk. + final_disk_type = var.disk_type == "" ? (local.native_bm ? "hyperdisk-extreme" : "pd-ssd") : var.disk_type + min_total_disk_map = { + "pd-ssd" = 550 + "pd-balanced" = 943 + "pd-extreme" = 0 + "hyperdisk-balanced" = 0 + "hyperdisk-extreme" = 0 + } + + min_total_disk = local.min_total_disk_map[local.final_disk_type] + + mem_size = lookup(local.mem_size_map, var.machine_type, 320) + hana_log_size = ceil(min(512, max(64, local.mem_size / 2))) + hana_data_size_min = ceil(local.mem_size * 12 / 10) + + hana_data_size = max(local.hana_data_size_min, local.min_total_disk - local.hana_log_size) + pd_size = ceil(max(local.min_total_disk, local.hana_log_size + local.hana_data_size_min + 1)) + + unified_pd_size = var.unified_disk_size_override == null ? ceil(local.pd_size) : var.unified_disk_size_override + data_pd_size = var.data_disk_size_override == null ? local.hana_data_size : var.data_disk_size_override + log_pd_size = var.log_disk_size_override == null ? local.hana_log_size : var.log_disk_size_override + + # Disk types + final_data_disk_type = var.data_disk_type_override == "" ? local.final_disk_type : var.data_disk_type_override + final_log_disk_type = var.log_disk_type_override == "" ? local.final_disk_type : var.log_disk_type_override + + # Disk IOPS + hdx_iops_map = { + "data" = max(10000, local.data_pd_size * 2) + "log" = max(10000, local.log_pd_size * 2) + "shared" = null + "usrsap" = null + "unified" = max(10000, local.data_pd_size * 2) + max(10000, local.log_pd_size * 2) + "worker" = max(10000, local.data_pd_size * 2) + max(10000, local.log_pd_size * 2) + } + hdb_iops_map = { + "data" = var.hyperdisk_balanced_iops_default + "log" = var.hyperdisk_balanced_iops_default + "shared" = null + "usrsap" = null + "unified" = var.hyperdisk_balanced_iops_default + "worker" = var.hyperdisk_balanced_iops_default + } + null_iops_map = { + "data" = null + "log" = null + "shared" = null + "usrsap" = null + "unified" = null + "worker" = null + } + iops_map = { + "pd-ssd" = local.null_iops_map + "pd-balanced" = local.null_iops_map + "pd-extreme" = local.hdx_iops_map + "hyperdisk-balanced" = local.hdb_iops_map + "hyperdisk-extreme" = local.hdx_iops_map + } + + final_data_iops = var.data_disk_iops_override == null ? local.iops_map[local.final_data_disk_type]["data"] : var.data_disk_iops_override + final_log_iops = var.log_disk_iops_override == null ? local.iops_map[local.final_log_disk_type]["log"] : var.log_disk_iops_override + final_unified_iops = var.unified_disk_iops_override == null ? local.iops_map[local.final_disk_type]["unified"] : var.unified_disk_iops_override + + # Disk throughput MB/s + hdb_throughput_map = { + "data" = var.hyperdisk_balanced_throughput_default + "log" = var.hyperdisk_balanced_throughput_default + "unified" = var.hyperdisk_balanced_throughput_default + } + null_throughput_map = { + "data" = null + "log" = null + "unified" = null + } + throughput_map = { + "pd-ssd" = local.null_throughput_map + "pd-balanced" = local.null_throughput_map + "pd-extreme" = local.null_throughput_map + "hyperdisk-balanced" = local.hdb_throughput_map + "hyperdisk-extreme" = local.null_throughput_map + } + + final_data_throughput = var.data_disk_throughput_override == null ? local.throughput_map[local.final_data_disk_type]["data"] : var.data_disk_throughput_override + final_log_throughput = var.log_disk_throughput_override == null ? local.throughput_map[local.final_log_disk_type]["log"] : var.log_disk_throughput_override + final_unified_throughput = var.unified_disk_throughput_override == null ? local.throughput_map[local.final_disk_type]["unified"] : var.unified_disk_throughput_override + + primary_startup_url = var.sap_deployment_debug ? replace(var.primary_startup_url, "bash -s", "bash -x -s") : var.primary_startup_url + secondary_startup_url = var.sap_deployment_debug ? replace(var.secondary_startup_url, "bash -s", "bash -x -s") : var.secondary_startup_url zone_split = split("-", var.zone) region = "${local.zone_split[0]}-${local.zone_split[1]}" @@ -96,12 +201,15 @@ locals { subnetwork_uri = length(local.subnetwork_split) > 1 ? ( "projects/${local.subnetwork_split[0]}/regions/${local.region}/subnetworks/${local.subnetwork_split[1]}") : ( "projects/${var.project_id}/regions/${local.region}/subnetworks/${var.subnetwork}") +} - pdssd_size = ceil(max(834, local.sap_hana_log_size + local.sap_hana_data_size + 1)) - primary_startup_url = var.sap_deployment_debug ? replace(var.primary_startup_url, "bash -s", "bash -x -s") : var.primary_startup_url - secondary_startup_url = var.sap_deployment_debug ? replace(var.secondary_startup_url, "bash -s", "bash -x -s") : var.secondary_startup_url +# tflint-ignore: terraform_unused_declarations +data "assert_test" "hyperdisk_with_native_bm" { + test = local.native_bm ? length(regexall("hyperdisk", local.final_disk_type)) > 0 : true + throw = "Native bare metal machines only work with hyperdisks. Set 'disk_type' accordingly, e.g. 'disk_type = hyperdisk-balanced'" } + ################################################################################ # disks ################################################################################ @@ -109,7 +217,7 @@ resource "google_compute_disk" "sap_hana_scaleout_boot_disks" { # Need a disk for primary, worker nodes, standby nodes count = var.sap_hana_worker_nodes + var.sap_hana_standby_nodes + 1 name = count.index == 0 ? "${var.instance_name}-boot" : "${var.instance_name}w${count.index}-boot" - type = "pd-standard" + type = local.native_bm ? "hyperdisk-balanced" : "pd-balanced" zone = var.zone size = 45 project = var.project_id @@ -123,14 +231,38 @@ resource "google_compute_disk" "sap_hana_scaleout_boot_disks" { } } -resource "google_compute_disk" "sap_hana_scaleout_pd_disks" { +resource "google_compute_disk" "sap_hana_scaleout_disks" { # Need a pd disk for primary, worker nodes - count = var.sap_hana_worker_nodes + 1 - name = format("${var.instance_name}-mnt%05d", count.index + 1) - type = "pd-ssd" - zone = var.zone - size = local.pdssd_size - project = var.project_id + count = var.use_single_data_log_disk ? var.sap_hana_worker_nodes + 1 : 0 + name = format("${var.instance_name}-hana%05d", count.index + 1) + type = local.final_disk_type + zone = var.zone + size = local.unified_pd_size + project = var.project_id + provisioned_iops = local.final_unified_iops + provisioned_throughput = local.final_unified_throughput +} + +resource "google_compute_disk" "sap_hana_data_disks" { + count = var.use_single_data_log_disk ? 0 : var.sap_hana_worker_nodes + 1 + name = format("${var.instance_name}-data%05d", count.index + 1) + type = local.final_data_disk_type + zone = var.zone + size = local.data_pd_size + project = var.project_id + provisioned_iops = local.final_data_iops + provisioned_throughput = local.final_data_throughput +} + +resource "google_compute_disk" "sap_hana_log_disks" { + count = var.use_single_data_log_disk ? 0 : var.sap_hana_worker_nodes + 1 + name = format("${var.instance_name}-log%05d", count.index + 1) + type = local.final_log_disk_type + zone = var.zone + size = local.log_pd_size + project = var.project_id + provisioned_iops = local.final_log_iops + provisioned_throughput = local.final_log_throughput } ################################################################################ @@ -143,6 +275,7 @@ resource "google_compute_address" "sap_hana_vm_ip" { address_type = "INTERNAL" region = local.region project = var.project_id + address = var.vm_static_ip } resource "google_compute_address" "sap_hana_worker_ip" { count = var.sap_hana_worker_nodes @@ -151,6 +284,7 @@ resource "google_compute_address" "sap_hana_worker_ip" { address_type = "INTERNAL" region = local.region project = var.project_id + address = length(var.worker_static_ips) > count.index ? var.worker_static_ips[count.index] : "" } resource "google_compute_address" "sap_hana_standby_ip" { count = var.sap_hana_standby_nodes @@ -159,6 +293,7 @@ resource "google_compute_address" "sap_hana_standby_ip" { address_type = "INTERNAL" region = local.region project = var.project_id + address = length(var.standby_static_ips) > count.index ? var.standby_static_ips[count.index] : "" } ################################################################################ @@ -179,17 +314,41 @@ resource "google_compute_instance" "sap_hana_scaleout_primary_instance" { source = google_compute_disk.sap_hana_scaleout_boot_disks[0].self_link } - attached_disk { - # we only attach the PDs to the primary and workers - device_name = google_compute_disk.sap_hana_scaleout_pd_disks[0].name - source = google_compute_disk.sap_hana_scaleout_pd_disks[0].self_link + dynamic "scheduling" { + for_each = local.native_bm ? [1] : [] + content { + on_host_maintenance = "TERMINATE" + } } + dynamic "attached_disk" { + for_each = var.use_single_data_log_disk ? [1] : [] + content { + device_name = google_compute_disk.sap_hana_scaleout_disks[0].name + source = google_compute_disk.sap_hana_scaleout_disks[0].self_link + } + } + dynamic "attached_disk" { + for_each = var.use_single_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_data_disks[0].name + source = google_compute_disk.sap_hana_data_disks[0].self_link + } + } + dynamic "attached_disk" { + for_each = var.use_single_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_log_disks[0].name + source = google_compute_disk.sap_hana_log_disks[0].self_link + } + } can_ip_forward = var.can_ip_forward network_interface { subnetwork = local.subnetwork_uri network_ip = google_compute_address.sap_hana_vm_ip.address + nic_type = var.nic_type == "" ? null : var.nic_type + # we only include access_config if public_ip is true, an empty access_config # will create an ephemeral public ip dynamic "access_config" { @@ -227,10 +386,10 @@ resource "google_compute_instance" "sap_hana_scaleout_primary_instance" { sap_hana_deployment_bucket = var.sap_hana_deployment_bucket sap_hana_original_role = "master" sap_hana_sid = var.sap_hana_sid + sap_hana_sapsys_gid = var.sap_hana_sapsys_gid sap_hana_instance_number = var.sap_hana_instance_number sap_hana_sidadm_password = var.sap_hana_sidadm_password sap_hana_sidadm_password_secret = var.sap_hana_sidadm_password_secret - # wording on system_password may be inconsitent with DM sap_hana_system_password = var.sap_hana_system_password sap_hana_system_password_secret = var.sap_hana_system_password_secret sap_hana_sidadm_uid = var.sap_hana_sidadm_uid @@ -239,6 +398,9 @@ resource "google_compute_instance" "sap_hana_scaleout_primary_instance" { sap_hana_standby_nodes = var.sap_hana_standby_nodes sap_hana_shared_nfs = var.sap_hana_shared_nfs sap_hana_backup_nfs = var.sap_hana_backup_nfs + use_single_data_log_disk = var.use_single_data_log_disk + sap_hana_data_disk_type = local.final_data_disk_type + native_bm = local.native_bm template-type = "TERRAFORM" } @@ -264,17 +426,40 @@ resource "google_compute_instance" "sap_hana_scaleout_worker_instances" { source = google_compute_disk.sap_hana_scaleout_boot_disks[count.index + 1].self_link } - attached_disk { - # we only attach the PDs to the primary and workers - device_name = google_compute_disk.sap_hana_scaleout_pd_disks[count.index + 1].name - source = google_compute_disk.sap_hana_scaleout_pd_disks[count.index + 1].self_link + dynamic "scheduling" { + for_each = local.native_bm ? [1] : [] + content { + on_host_maintenance = "TERMINATE" + } } + dynamic "attached_disk" { + for_each = var.use_single_data_log_disk ? [1] : [] + content { + device_name = google_compute_disk.sap_hana_scaleout_disks[count.index + 1].name + source = google_compute_disk.sap_hana_scaleout_disks[count.index + 1].self_link + } + } + dynamic "attached_disk" { + for_each = var.use_single_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_data_disks[count.index + 1].name + source = google_compute_disk.sap_hana_data_disks[count.index + 1].self_link + } + } + dynamic "attached_disk" { + for_each = var.use_single_data_log_disk ? [] : [1] + content { + device_name = google_compute_disk.sap_hana_log_disks[count.index + 1].name + source = google_compute_disk.sap_hana_log_disks[count.index + 1].self_link + } + } can_ip_forward = var.can_ip_forward network_interface { subnetwork = local.subnetwork_uri network_ip = google_compute_address.sap_hana_worker_ip[count.index].address + nic_type = var.nic_type == "" ? null : var.nic_type # we only include access_config if public_ip is true, an empty access_config # will create an ephemeral public ip dynamic "access_config" { @@ -311,6 +496,7 @@ resource "google_compute_instance" "sap_hana_scaleout_worker_instances" { sap_deployment_debug = var.sap_deployment_debug sap_hana_deployment_bucket = var.sap_hana_deployment_bucket sap_hana_sid = var.sap_hana_sid + sap_hana_sapsys_gid = var.sap_hana_sapsys_gid sap_hana_instance_number = var.sap_hana_instance_number sap_hana_scaleout_nodes = var.sap_hana_worker_nodes + var.sap_hana_standby_nodes @@ -324,6 +510,8 @@ resource "google_compute_instance" "sap_hana_scaleout_worker_instances" { sap_hana_backup_nfs = var.sap_hana_backup_nfs sap_hana_worker_nodes = var.sap_hana_worker_nodes sap_hana_standby_nodes = var.sap_hana_standby_nodes + use_single_data_log_disk = var.use_single_data_log_disk + native_bm = local.native_bm template-type = "TERRAFORM" } @@ -353,12 +541,19 @@ resource "google_compute_instance" "sap_hana_scaleout_standby_instances" { source = google_compute_disk.sap_hana_scaleout_boot_disks[count.index + var.sap_hana_worker_nodes + 1].self_link } + dynamic "scheduling" { + for_each = local.native_bm ? [1] : [] + content { + on_host_maintenance = "TERMINATE" + } + } can_ip_forward = var.can_ip_forward network_interface { subnetwork = local.subnetwork_uri network_ip = google_compute_address.sap_hana_standby_ip[count.index].address + nic_type = var.nic_type == "" ? null : var.nic_type # we only include access_config if public_ip is true, an empty access_config # will create an ephemeral public ip dynamic "access_config" { @@ -395,6 +590,7 @@ resource "google_compute_instance" "sap_hana_scaleout_standby_instances" { sap_deployment_debug = var.sap_deployment_debug sap_hana_deployment_bucket = var.sap_hana_deployment_bucket sap_hana_sid = var.sap_hana_sid + sap_hana_sapsys_gid = var.sap_hana_sapsys_gid sap_hana_instance_number = var.sap_hana_instance_number sap_hana_scaleout_nodes = var.sap_hana_worker_nodes + var.sap_hana_standby_nodes @@ -404,10 +600,12 @@ resource "google_compute_instance" "sap_hana_scaleout_standby_instances" { sap_hana_system_password = var.sap_hana_system_password sap_hana_system_password_secret = var.sap_hana_system_password_secret sap_hana_sidadm_uid = var.sap_hana_sidadm_uid + sap_hana_sapsys_gid = var.sap_hana_sapsys_gid sap_hana_shared_nfs = var.sap_hana_shared_nfs sap_hana_backup_nfs = var.sap_hana_backup_nfs sap_hana_worker_nodes = var.sap_hana_worker_nodes sap_hana_standby_nodes = var.sap_hana_standby_nodes + native_bm = local.native_bm template-type = "TERRAFORM" } diff --git a/modules/sap_hana_scaleout/outputs.tf b/modules/sap_hana_scaleout/outputs.tf index abf07652..e3d94e4f 100644 --- a/modules/sap_hana_scaleout/outputs.tf +++ b/modules/sap_hana_scaleout/outputs.tf @@ -13,7 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - output "sap_hana_primary_self_link" { description = "Self-link for the primary SAP HANA Scalout instance created." value = google_compute_instance.sap_hana_scaleout_primary_instance.self_link diff --git a/modules/sap_hana_scaleout/variables.tf b/modules/sap_hana_scaleout/variables.tf index d8c55f23..f6e73844 100644 --- a/modules/sap_hana_scaleout/variables.tf +++ b/modules/sap_hana_scaleout/variables.tf @@ -13,7 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - variable "project_id" { type = string description = "Project id where the instances will be created." @@ -64,8 +63,12 @@ variable "sap_hana_sid" { variable "sap_hana_deployment_bucket" { type = string - description = "The GCS bucket containing the SAP HANA media. If this is not defined, the GCE instance will be provisioned without SAP HANA installed." - default = "" + description = "The Cloud Storage path that contains the SAP HANA media, do not include gs://. If this is not defined, the GCE instance will be provisioned without SAP HANA installed." + validation { + condition = (!(length(regexall("gs:", var.sap_hana_deployment_bucket)) > 0)) + error_message = "The sap_hana_deployment_bucket must only contain the Cloud Storage path, which includes the bucket name and the names of any folders. Do not include gs://." + } + default = "" } variable "sap_hana_instance_number" { @@ -142,12 +145,20 @@ variable "sap_hana_standby_nodes" { } variable "sap_hana_shared_nfs" { - type = string + type = string + validation { + condition = var.sap_hana_shared_nfs == "" || can(regex("(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}:\\/[^[:space:]]*", var.sap_hana_shared_nfs)) + error_message = "The sap_hana_shared_nfs must be an IP address followed by ':/' then some name." + } description = "Google Filestore share for /hana/shared" } variable "sap_hana_backup_nfs" { - type = string + type = string + validation { + condition = var.sap_hana_backup_nfs == "" || can(regex("(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}:\\/[^[:space:]]*", var.sap_hana_backup_nfs)) + error_message = "The sap_hana_backup_nfs must be an IP address followed by ':/' then some name." + } description = "Google Filestore share for /hanabackup" } @@ -157,6 +168,12 @@ variable "sap_hana_sidadm_uid" { default = 900 } +variable "sap_hana_sapsys_gid" { + type = number + description = "The Linux GID of the SAPSYS group. By default this is set to 79" + default = 79 +} + variable "network_tags" { type = list(string) description = "OPTIONAL - Network tags can be associated to your instance on deployment. This can be used for firewalling or routing purposes." @@ -206,18 +223,153 @@ variable "post_deployment_script" { default = "" } +variable "nic_type" { + type = string + description = "Optional - This value determines the type of NIC to use, valid options are GVNIC and VIRTIO_NET. If choosing GVNIC make sure that it is supported by your OS choice here https://cloud.google.com/compute/docs/images/os-details#networking." + validation { + condition = contains(["VIRTIO_NET", "GVNIC", ""], var.nic_type) + error_message = "The nic_type must be either GVNIC or VIRTIO_NET." + } + default = "" +} + +variable "disk_type" { + type = string + description = "Optional - The default disk type to use on all disks deployed. Extreme disks are not supported on all machine types. See https://cloud.google.com/compute/docs/disks/ for details." + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme"], var.disk_type) + error_message = "The disk_type must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "pd-ssd" +} + +variable "use_single_data_log_disk" { + type = bool + description = "Optional - By default two separate disk for data and logs will be made. If set to true, one disk will be used instead." + default = false +} + +variable "hyperdisk_balanced_iops_default" { + type = number + description = "Optional - default is 3000. Number of IOPS that is set for each disk of type Hyperdisk-balanced (except for boot disk)." + default = 3000 +} + +variable "hyperdisk_balanced_throughput_default" { + type = number + description = "Optional - default is 750. Throughput in MB/s that is set for each disk of type Hyperdisk-balanced (except for boot disk)." + default = 750 +} + +variable "vm_static_ip" { + type = string + description = "Optional - Defines an internal static IP for the VM." + validation { + condition = var.vm_static_ip == "" || can(regex("^(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$", var.vm_static_ip)) + error_message = "The vm_static_ip must be a valid IP address." + } + default = "" +} + +variable "worker_static_ips" { + type = list(string) + description = "Optional - Defines internal static IP addresses for the worker nodes." + validation { + condition = alltrue([ + for ip in var.worker_static_ips : ip == "" || can(regex("^(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$", ip)) + ]) + error_message = "All worker_static_ips must be valid IP addresses." + } + default = [] +} + +variable "standby_static_ips" { + type = list(string) + description = "Optional - Defines internal static IP addresses for the standby nodes." + validation { + condition = alltrue([ + for ip in var.standby_static_ips : ip == "" || can(regex("^(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$", ip)) + ]) + error_message = "All standby_static_ips must be valid IP addresses." + } + default = [] +} + # -# DO NOT MODIFY unless you know what you are doing +# DO NOT MODIFY unless instructed or aware of the implications of using those settings # +variable "data_disk_type_override" { + type = string + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the data disk." + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.data_disk_type_override) + error_message = "The data_disk_type_override must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "" +} +variable "log_disk_type_override" { + type = string + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Override the 'default_disk_type' for the log disk." + validation { + condition = contains(["pd-ssd", "pd-balanced", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme", ""], var.log_disk_type_override) + error_message = "The log_disk_type_override must be either pd-ssd, pd-balanced, pd-extreme, hyperdisk-balanced, or hyperdisk-extreme." + } + default = "" +} +variable "unified_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the primary's unified disk, that is based off of the machine_type." + default = null +} +variable "data_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the data disk(s), that is based off of the machine_type." + default = null +} +variable "log_disk_size_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Overrides the default size for the log disk(s), that is based off of the machine_type." + default = null +} +variable "unified_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the primary's unified disk will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "data_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the data disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "log_disk_iops_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the number of IOPS that the log disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "unified_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the primary's unified disk will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "data_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the data disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} +variable "log_disk_throughput_override" { + type = number + description = "Warning, do not use unless instructed or aware of the implications of using this setting. Directly sets the throughput in MB/s that the log disk(s) will use. Has no effect if not using a disk type that supports it." + default = null +} variable "primary_startup_url" { type = string description = "Startup script to be executed when the VM boots, should not be overridden." - default = "curl -s BUILD.TERRA_SH_URL/sap_hana_scaleout/startup.sh | bash -s BUILD.TERRA_SH_URL" + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana_scaleout/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform" } variable "secondary_startup_url" { type = string - default = "curl -s BUILD.TERRA_SH_URL/sap_hana_scaleout/startup_secondary.sh | bash -s BUILD.TERRA_SH_URL" + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_hana_scaleout/startup_secondary.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform" description = "DO NOT USE" } @@ -226,3 +378,4 @@ variable "can_ip_forward" { description = "Whether sending and receiving of packets with non-matching source or destination IPs is allowed." default = true } + diff --git a/modules/sap_hana_scaleout/versions.tf b/modules/sap_hana_scaleout/versions.tf index d902e9ce..12cc1123 100644 --- a/modules/sap_hana_scaleout/versions.tf +++ b/modules/sap_hana_scaleout/versions.tf @@ -13,7 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - terraform { required_version = ">=0.12.6" required_providers { @@ -21,8 +20,11 @@ terraform { source = "hashicorp/google" version = ">= 4.0.0, < 6" } + assert = { + source = "bwoznicki/assert" + version = "0.0.1" + } } - provider_meta "google" { module_name = "blueprints/terraform/terraform-google-sap:sap_hana_scaleout/v1.1.2" } diff --git a/modules/sap_nw/README.md b/modules/sap_nw/README.md new file mode 100644 index 00000000..a9c7ca4b --- /dev/null +++ b/modules/sap_nw/README.md @@ -0,0 +1,82 @@ +# Terraform for SAP NW for Google Cloud +This template follows the documented steps https://cloud.google.com/solutions/sap/docs/netweaver-deployment-guide-linux and deploys GCP and Pacemaker resources up to the installation of SAP's central services. + +## Set up Terraform + +Install Terraform on the machine you would like to use to deploy from by following https://learn.hashicorp.com/tutorials/terraform/install-cli?in=terraform/gcp-get-started#install-terraform + +## How to deploy + +1. Download .tf file into an empty directory +`curl https://storage.googleapis.com/cloudsapdeploy/terraform/latest/terraform/sap_nw/terraform/sap_nw.tf -o sap_nw.tf` + +2. Fill in mandatory variables and if the desired optional variable in the .tf file. + +3. Deploy + 1. Run `terraform init` (only needed once) + 2. Run `terraform plan` to see what is going to be deployed. Verify if names, zones, sizes, etc. are as desired. + 3. Run `terrafom apply` to deploy the resources + 4. Run `terrafom destroy` to remove the resources + +4. Continue installation of SAP software and setup of remaining cluster resources as per documentation at https://cloud.google.com/solutions/sap/docs/netweaver-deployment-guide-linux + +## Additional information + +For additional information see https://www.terraform.io/docs/index.html and https://cloud.google.com/docs/terraform + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| can\_ip\_forward | Whether sending and receiving of packets with non-matching source or destination IPs is allowed. | `bool` | `true` | no | +| instance\_name | Hostname of the GCE instance. | `string` | n/a | yes | +| linux\_image | Linux image name to use. | `string` | n/a | yes | +| linux\_image\_project | The project which the Linux image belongs to. | `string` | n/a | yes | +| machine\_type | Machine type for the instances. | `string` | n/a | yes | +| network\_tags | OPTIONAL - Network tags can be associated to your instance on deployment. This can be used for firewalling or routing purposes. | `list(string)` | `[]` | no | +| post\_deployment\_script | OPTIONAL - gs:// or https:// location of a script to execute on the created VM's post deployment. | `string` | `""` | no | +| primary\_startup\_url | Startup script to be executed when the VM boots, should not be overridden. | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_nw/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform"` | no | +| project\_id | Project id where the instances will be created. | `string` | n/a | yes | +| public\_ip | OPTIONAL - Defines whether a public IP address should be added to your VM. By default this is set to Yes. Note that if you set this to No without appropriate network nat and tags in place, there will be no route to the internet and thus the installation will fail. | `bool` | `true` | no | +| reservation\_name | Use a reservation specified by RESERVATION\_NAME.
By default ANY\_RESERVATION is used when this variable is empty.
In order for a reservation to be used it must be created with the
"Select specific reservation" selected (specificReservationRequired set to true)
Be sure to create your reservation with the correct Min CPU Platform for the
following instance types:
n1-highmem-32 : Intel Broadwell
n1-highmem-64 : Intel Broadwell
n1-highmem-96 : Intel Skylake
n1-megamem-96 : Intel Skylake
m1-megamem-96 : Intel Skylake
All other instance types can have automatic Min CPU Platform" | `string` | `""` | no | +| sap\_deployment\_debug | OPTIONAL - If this value is set to true, the deployment will generates verbose deployment logs. Only turn this setting on if a Google support engineer asks you to enable debugging. | `bool` | `false` | no | +| sap\_mnt\_size | Size of /sapmnt in GB | `number` | `8` | no | +| service\_account | OPTIONAL - Ability to define a custom service account instead of using the default project service account. | `string` | `""` | no | +| subnetwork | The sub network to deploy the instance in. | `string` | n/a | yes | +| swap\_size | Size in GB of swap volume | `number` | `8` | no | +| usr\_sap\_size | Size of /usr/sap in GB | `number` | `8` | no | +| zone | Zone where the instances will be created. | `string` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| sap\_nw\_self\_link | SAP NW self-link for instance created | + + + +## Requirements + +These sections describe requirements for using this module. + +### Software + +The following dependencies must be available: + +- [Terraform][terraform] v0.13 +- [Terraform Provider for GCP][terraform-provider-gcp] plugin v4.0 + +## Contributing + +Refer to the [contribution guidelines](./CONTRIBUTING.md) for +information on contributing to this module. + +[iam-module]: https://registry.terraform.io/modules/terraform-google-modules/iam/google +[project-factory-module]: https://registry.terraform.io/modules/terraform-google-modules/project-factory/google +[terraform-provider-gcp]: https://www.terraform.io/docs/providers/google/index.html +[terraform]: https://www.terraform.io/downloads.html + +## Security Disclosures + +Please see our [security disclosure process](./SECURITY.md). diff --git a/modules/sap_nw/main.tf b/modules/sap_nw/main.tf new file mode 100644 index 00000000..9e0eebda --- /dev/null +++ b/modules/sap_nw/main.tf @@ -0,0 +1,185 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +# +# Terraform SAP NW for Google Cloud +# +# Version: 2.0.202404101403 +# Build Hash: c1f78e4d8c44de3be18fc7b3a64ccf60a94a85bc +# + +################################################################################ +# Local variables +################################################################################ +locals { + zone_split = split("-", var.zone) + region = "${local.zone_split[0]}-${local.zone_split[1]}" + subnetwork_split = split("/", var.subnetwork) + subnetwork_uri = length(local.subnetwork_split) > 1 ? ( + "projects/${local.subnetwork_split[0]}/regions/${local.region}/subnetworks/${local.subnetwork_split[1]}") : ( + "projects/${var.project_id}/regions/${local.region}/subnetworks/${var.subnetwork}") + + + cpu_map = { + "n1-highmem-96" : "Intel Skylake", + "n1-megamem-96" : "Intel Skylake", + } + primary_startup_url = var.sap_deployment_debug ? replace(var.primary_startup_url, "bash -s", "bash -x -s") : var.primary_startup_url +} + +################################################################################ +# disks +################################################################################ +resource "google_compute_disk" "sap_nw_boot_disk" { + name = "${var.instance_name}-boot" + type = "pd-balanced" + zone = var.zone + size = 30 # GB + project = var.project_id + image = "${var.linux_image_project}/${var.linux_image}" + + lifecycle { + # Ignores newer versions of the OS image. Removing this lifecycle + # and re-applying will cause the current disk to be deleted. + # All existing data will be lost. + ignore_changes = [image] + } +} + +resource "google_compute_disk" "sap_nw_usrsap_disk" { + count = var.usr_sap_size > 0 ? 1 : 0 + name = "${var.instance_name}-usrsap" + type = "pd-balanced" + zone = var.zone + size = var.usr_sap_size + project = var.project_id +} + +resource "google_compute_disk" "sap_nw_swap_disk" { + count = var.swap_size > 0 ? 1 : 0 + name = "${var.instance_name}-swap" + type = "pd-balanced" + zone = var.zone + size = var.swap_size + project = var.project_id +} + +resource "google_compute_disk" "sap_nw_sapmnt_disk" { + count = var.sap_mnt_size > 0 ? 1 : 0 + name = "${var.instance_name}-sapmnt" + type = "pd-balanced" + size = var.sap_mnt_size + zone = var.zone + project = var.project_id +} + +################################################################################ +# VIPs +################################################################################ +resource "google_compute_address" "sap_nw_vm_ip" { + name = var.instance_name + subnetwork = local.subnetwork_uri + address_type = "INTERNAL" + region = local.region + project = var.project_id +} +################################################################################ +# instances +################################################################################ +resource "google_compute_instance" "sap_nw_instance" { + name = var.instance_name + machine_type = var.machine_type + zone = var.zone + project = var.project_id + min_cpu_platform = lookup(local.cpu_map, var.machine_type, "Automatic") + + boot_disk { + auto_delete = true + device_name = "boot" + source = google_compute_disk.sap_nw_boot_disk.self_link + } + + dynamic "attached_disk" { + for_each = var.usr_sap_size > 0 ? [1] : [] + content { + device_name = google_compute_disk.sap_nw_usrsap_disk[0].name + source = google_compute_disk.sap_nw_usrsap_disk[0].self_link + } + } + + dynamic "attached_disk" { + for_each = var.sap_mnt_size > 0 ? [1] : [] + content { + device_name = google_compute_disk.sap_nw_sapmnt_disk[0].name + source = google_compute_disk.sap_nw_sapmnt_disk[0].self_link + } + } + + dynamic "attached_disk" { + for_each = var.swap_size > 0 ? [1] : [] + content { + device_name = google_compute_disk.sap_nw_swap_disk[0].name + source = google_compute_disk.sap_nw_swap_disk[0].self_link + } + } + + can_ip_forward = var.can_ip_forward + + network_interface { + subnetwork = local.subnetwork_uri + network_ip = google_compute_address.sap_nw_vm_ip.address + + # we only include access_config if public_ip is true, an empty access_config + # will create an ephemeral public ip + dynamic "access_config" { + for_each = var.public_ip ? [1] : [] + content { + } + } + } + + tags = var.network_tags + + service_account { + # An empty string service account will default to the projects default compute engine service account + email = var.service_account + scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + + dynamic "reservation_affinity" { + for_each = length(var.reservation_name) > 1 ? [1] : [] + content { + type = "SPECIFIC_RESERVATION" + specific_reservation { + key = "compute.googleapis.com/reservation-name" + values = [var.reservation_name] + } + } + } + + metadata = { + startup-script = local.primary_startup_url + post_deployment_script = var.post_deployment_script + sap_deployment_debug = var.sap_deployment_debug + template-type = "TERRAFORM" + } + + lifecycle { + # Ignore changes in the instance metadata, since it is modified by the SAP startup script. + ignore_changes = [metadata] + } +} diff --git a/modules/sap_nw/outputs.tf b/modules/sap_nw/outputs.tf new file mode 100644 index 00000000..4a939a43 --- /dev/null +++ b/modules/sap_nw/outputs.tf @@ -0,0 +1,19 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +output "sap_nw_self_link" { + description = "SAP NW self-link for instance created" + value = google_compute_instance.sap_nw_instance.self_link +} diff --git a/modules/sap_nw/variables.tf b/modules/sap_nw/variables.tf new file mode 100644 index 00000000..d68b508d --- /dev/null +++ b/modules/sap_nw/variables.tf @@ -0,0 +1,147 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +variable "project_id" { + type = string + description = "Project id where the instances will be created." +} + +variable "zone" { + type = string + description = "Zone where the instances will be created." +} + +variable "machine_type" { + type = string + description = "Machine type for the instances." +} + +variable "subnetwork" { + type = string + description = "The sub network to deploy the instance in." +} + +variable "linux_image" { + type = string + description = "Linux image name to use." +} + +variable "linux_image_project" { + type = string + description = "The project which the Linux image belongs to." +} + +variable "instance_name" { + type = string + description = "Hostname of the GCE instance." + validation { + condition = can(regex("^[a-z0-9\\-]+$", var.instance_name)) + error_message = "The instance_name must consist of lowercase letters (a-z), numbers, and hyphens." + } +} + +variable "usr_sap_size" { + type = number + description = "Size of /usr/sap in GB" + default = 8 + validation { + condition = var.usr_sap_size >= 8 + error_message = "Size of /usr/sap must be larger than 8 GB." + } +} + +variable "sap_mnt_size" { + type = number + description = "Size of /sapmnt in GB" + default = 8 + validation { + condition = var.sap_mnt_size >= 8 + error_message = "Size of /sapmnt must be larger than 8 GB." + } +} + +variable "swap_size" { + type = number + description = "Size in GB of swap volume" + default = 8 + validation { + condition = var.swap_size >= 8 + error_message = "Size of swap must be larger than 8 GB." + } +} + +variable "network_tags" { + type = list(string) + description = "OPTIONAL - Network tags can be associated to your instance on deployment. This can be used for firewalling or routing purposes." + default = [] +} + +variable "public_ip" { + type = bool + description = "OPTIONAL - Defines whether a public IP address should be added to your VM. By default this is set to Yes. Note that if you set this to No without appropriate network nat and tags in place, there will be no route to the internet and thus the installation will fail." + default = true +} + +variable "service_account" { + type = string + description = "OPTIONAL - Ability to define a custom service account instead of using the default project service account." + default = "" +} + +variable "sap_deployment_debug" { + type = bool + description = "OPTIONAL - If this value is set to true, the deployment will generates verbose deployment logs. Only turn this setting on if a Google support engineer asks you to enable debugging." + default = false +} + +variable "reservation_name" { + type = string + description = <<-EOT + Use a reservation specified by RESERVATION_NAME. + By default ANY_RESERVATION is used when this variable is empty. + In order for a reservation to be used it must be created with the + "Select specific reservation" selected (specificReservationRequired set to true) + Be sure to create your reservation with the correct Min CPU Platform for the + following instance types: + n1-highmem-32 : Intel Broadwell + n1-highmem-64 : Intel Broadwell + n1-highmem-96 : Intel Skylake + n1-megamem-96 : Intel Skylake + m1-megamem-96 : Intel Skylake + All other instance types can have automatic Min CPU Platform" + EOT + default = "" +} + +variable "post_deployment_script" { + type = string + description = "OPTIONAL - gs:// or https:// location of a script to execute on the created VM's post deployment." + default = "" +} + +# +# DO NOT MODIFY unless you know what you are doing +# +variable "primary_startup_url" { + type = string + description = "Startup script to be executed when the VM boots, should not be overridden." + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_nw/startup.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform" +} + +variable "can_ip_forward" { + type = bool + description = "Whether sending and receiving of packets with non-matching source or destination IPs is allowed." + default = true +} diff --git a/modules/sap_nw/versions.tf b/modules/sap_nw/versions.tf new file mode 100644 index 00000000..fb459560 --- /dev/null +++ b/modules/sap_nw/versions.tf @@ -0,0 +1,24 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +terraform { + required_version = ">=0.12.6" + required_providers { + google = { + source = "hashicorp/google" + version = ">= 4.0.0, < 6" + } + } +} diff --git a/modules/sap_nw_ha/README.md b/modules/sap_nw_ha/README.md new file mode 100644 index 00000000..1696ea39 --- /dev/null +++ b/modules/sap_nw_ha/README.md @@ -0,0 +1,116 @@ +# Terraform for SAP NW HA for Google Cloud +This template follows the documented steps https://cloud.google.com/solutions/sap/docs/netweaver-ha-config-sles and https://cloud.google.com/solutions/sap/docs/netweaver-ha-config-rhel and deploys GCP and Pacemaker resources up to the installation of SAP's central services. + +## Set up Terraform + +Install Terraform on the machine you would like to use to deploy from by following https://learn.hashicorp.com/tutorials/terraform/install-cli?in=terraform/gcp-get-started#install-terraform + +## How to deploy + +1. Download .tf file into an empty directory +`curl https://storage.googleapis.com/cloudsapdeploy/deploymentmanager/latest/dm-templates/sap_nw_ha/terraform/sap_nw_ha.tf -o sap_nw_ha.tf` + +2. Fill in mandatory variables and if the desired optional variable in the .tf file. + +3. Deploy + 1. Run `terraform init` (only needed once) + 2. Run `terraform plan` to see what is going to be deployed. Verify if names, zones, sizes, etc. are as desired. + 3. Run `terrafom apply` to deploy the resources + 4. Run `terrafom destroy` to remove the resources + +4. Continue installation of SAP software and setup of remaining cluster resources as per documentation at https://cloud.google.com/solutions/sap/docs/netweaver-ha-config-sles or https://cloud.google.com/solutions/sap/docs/netweaver-ha-config-rhel + +## Additional information + +For additional information see https://www.terraform.io/docs/index.html and https://cloud.google.com/docs/terraform + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| can\_ip\_forward | Whether sending and receiving of packets with non-matching source or destination IPs is allowed. | `bool` | `true` | no | +| ers\_backend\_svc\_name | Name of ERS backend service | `string` | `""` | no | +| ers\_forw\_rule\_name | Name of ERS forwarding rule | `string` | `""` | no | +| ers\_hc\_name | Name of ERS health check | `string` | `""` | no | +| ers\_hc\_port | Port of ERS health check | `string` | `""` | no | +| ers\_inst\_group\_name | Name of ERS instance group | `string` | `""` | no | +| ers\_vip\_address | Address of ERS virtual IP | `string` | `""` | no | +| ers\_vip\_name | Name of ERS virtual IP | `string` | `""` | no | +| hc\_firewall\_rule\_name | Name of firewall rule for the health check | `string` | `""` | no | +| hc\_network\_tag | Network tag for the health check firewall rule | `list(string)` | `[]` | no | +| linux\_image | Linux image name | `string` | n/a | yes | +| linux\_image\_project | Linux image project | `string` | n/a | yes | +| machine\_type | Machine type for the instances | `string` | n/a | yes | +| network | Network for the instances | `string` | n/a | yes | +| network\_tags | Network tags to apply to the instances | `list(string)` | `[]` | no | +| nfs\_path | NFS path for shared file system, e.g. 10.163.58.114:/ssd | `string` | n/a | yes | +| pacemaker\_cluster\_name | Name of Pacemaker cluster. | `string` | `""` | no | +| post\_deployment\_script | Specifies the location of a script to run after the deployment is complete.
The script should be hosted on a web server or in a GCS bucket. The URL should
begin with http:// https:// or gs://. Note that this script will be executed
on all VM's that the template creates. If you only want to run it on the master
instance you will need to add a check at the top of your script. | `string` | `""` | no | +| primary\_reservation\_name | Use a reservation specified by RESERVATION\_NAME.
By default ANY\_RESERVATION is used when this variable is empty.
In order for a reservation to be used it must be created with the
"Select specific reservation" selected (specificReservationRequired set to true)
Be sure to create your reservation with the correct Min CPU Platform for the
following instance types:
n1-highmem-32 : Intel Broadwell
n1-highmem-64 : Intel Broadwell
n1-highmem-96 : Intel Skylake
n1-megamem-96 : Intel Skylake
m1-megamem-96 : "Intel Skylake"
All other instance types can have automatic Min CPU Platform" | `string` | `""` | no | +| primary\_startup\_url | DO NOT USE | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_nw_ha/startup_scs.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform"` | no | +| project\_id | Project id where the instances will be created | `string` | n/a | yes | +| public\_ip | Create an ephemeral public ip for the instances | `bool` | `false` | no | +| sap\_deployment\_debug | Debug log level for deployment | `bool` | `false` | no | +| sap\_ers\_instance\_number | ERS instance number | `string` | `"10"` | no | +| sap\_mnt\_size | Size of /sapmnt in GB | `number` | `8` | no | +| sap\_nw\_abap | Is this a Netweaver ABAP installation. Set 'false' for NW Java. Dual stack is not supported by this script. | `bool` | `true` | no | +| sap\_primary\_instance | Name of first instance (initial SCS location) | `string` | n/a | yes | +| sap\_primary\_zone | Zone where the first instance will be created | `string` | n/a | yes | +| sap\_scs\_instance\_number | SCS instance number | `string` | `"00"` | no | +| sap\_secondary\_instance | Name of second instance (initial ERS location) | `string` | n/a | yes | +| sap\_secondary\_zone | Zone where the second instance will be created | `string` | n/a | yes | +| sap\_sid | SAP System ID | `string` | n/a | yes | +| scs\_backend\_svc\_name | Name of SCS backend service | `string` | `""` | no | +| scs\_forw\_rule\_name | Name of SCS forwarding rule | `string` | `""` | no | +| scs\_hc\_name | Name of SCS health check | `string` | `""` | no | +| scs\_hc\_port | Port of SCS health check | `string` | `""` | no | +| scs\_inst\_group\_name | Name of SCS instance group | `string` | `""` | no | +| scs\_vip\_address | Address of SCS virtual IP | `string` | `""` | no | +| scs\_vip\_name | Name of SCS virtual IP | `string` | `""` | no | +| secondary\_reservation\_name | Use a reservation specified by RESERVATION\_NAME.
By default ANY\_RESERVATION is used when this variable is empty.
In order for a reservation to be used it must be created with the
"Select specific reservation" selected (specificReservationRequired set to true)
Be sure to create your reservation with the correct Min CPU Platform for the
following instance types:
n1-highmem-32 : Intel Broadwell
n1-highmem-64 : Intel Broadwell
n1-highmem-96 : Intel Skylake
n1-megamem-96 : Intel Skylake
m1-megamem-96 : "Intel Skylake"
All other instance types can have automatic Min CPU Platform" | `string` | `""` | no | +| secondary\_startup\_url | DO NOT USE | `string` | `"curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_nw_ha/startup_ers.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform"` | no | +| service\_account | Service account that will be used as the service account on the created instance.
Leave this blank to use the project default service account | `string` | `""` | no | +| subnetwork | Subnetwork for the instances | `string` | n/a | yes | +| swap\_size | Size in GB of swap volume | `number` | `8` | no | +| usr\_sap\_size | Size of /usr/sap in GB | `number` | `8` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| ers\_instance | ERS instance | +| nw\_forwarding\_rules | Forwarding rules | +| nw\_hc | Health Checks | +| nw\_hc\_firewall | Firewall rule for the Health Checks | +| nw\_instance\_groups | NW Instance Groups | +| nw\_regional\_backend\_services | Backend Services | +| nw\_vips | NW virtual IPs | +| scs\_instance | SCS instance | + + + +## Requirements + +These sections describe requirements for using this module. + +### Software + +The following dependencies must be available: + +- [Terraform][terraform] v0.13 +- [Terraform Provider for GCP][terraform-provider-gcp] plugin v4.0 + +## Contributing + +Refer to the [contribution guidelines](./CONTRIBUTING.md) for +information on contributing to this module. + +[iam-module]: https://registry.terraform.io/modules/terraform-google-modules/iam/google +[project-factory-module]: https://registry.terraform.io/modules/terraform-google-modules/project-factory/google +[terraform-provider-gcp]: https://www.terraform.io/docs/providers/google/index.html +[terraform]: https://www.terraform.io/downloads.html + +## Security Disclosures + +Please see our [security disclosure process](./SECURITY.md). diff --git a/modules/sap_nw_ha/main.tf b/modules/sap_nw_ha/main.tf new file mode 100644 index 00000000..ae7ace84 --- /dev/null +++ b/modules/sap_nw_ha/main.tf @@ -0,0 +1,425 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +# +# Terraform SAP NW HA for Google Cloud +# +# Version: 2.0.202404101403 +# Build Hash: c1f78e4d8c44de3be18fc7b3a64ccf60a94a85bc +# + +################################################################################ +# Local variables +################################################################################ +locals { + primary_region = regex("[a-z]*-[a-z1-9]*", var.sap_primary_zone) + secondary_region = regex("[a-z]*-[a-z1-9]*", var.sap_secondary_zone) + region = local.primary_region + subnetwork_split = split("/", var.subnetwork) + split_network = split("/", var.network) + is_vpc_network = length(local.split_network) > 1 + ascs = var.sap_nw_abap == true ? "A" : "" + + sid = lower(var.sap_sid) + + hc_firewall_rule_name = var.hc_firewall_rule_name == "" ? "${local.sid}-hc-allow" : var.hc_firewall_rule_name + hc_network_tag = length(var.hc_network_tag) == 0 ? [local.hc_firewall_rule_name] : var.hc_network_tag + + sap_scs_instance_number = var.sap_scs_instance_number == "" ? "00" : var.sap_scs_instance_number + scs_inst_group_name = var.scs_inst_group_name == "" ? "${local.sid}-scs-ig" : var.scs_inst_group_name + scs_hc_name = var.scs_hc_name == "" ? "${local.sid}-scs-hc" : var.scs_hc_name + scs_hc_port = var.scs_hc_port == "" ? "600${local.sap_scs_instance_number}" : var.scs_hc_port + scs_vip_name = var.scs_vip_name == "" ? "${local.sid}-scs-vip" : var.scs_vip_name + scs_vip_address = var.scs_vip_address == "" ? "" : var.scs_vip_address + scs_backend_svc_name = var.scs_backend_svc_name == "" ? "${local.sid}-scs-backend-svc" : var.scs_backend_svc_name + scs_forw_rule_name = var.scs_forw_rule_name == "" ? "${local.sid}-scs-fwd-rule" : var.scs_forw_rule_name + + sap_ers_instance_number = var.sap_ers_instance_number == "" ? "10" : var.sap_ers_instance_number + ers_inst_group_name = var.ers_inst_group_name == "" ? "${local.sid}-ers-ig" : var.ers_inst_group_name + ers_hc_name = var.ers_hc_name == "" ? "${local.sid}-ers-hc" : var.ers_hc_name + ers_hc_port = var.ers_hc_port == "" ? "600${local.sap_ers_instance_number}" : var.ers_hc_port + ers_vip_name = var.ers_vip_name == "" ? "${local.sid}-ers-vip" : var.ers_vip_name + ers_vip_address = var.ers_vip_address == "" ? "" : var.ers_vip_address + ers_backend_svc_name = var.ers_backend_svc_name == "" ? "${local.sid}-ers-backend-svc" : var.ers_backend_svc_name + ers_forw_rule_name = var.ers_forw_rule_name == "" ? "${local.sid}-ers-fwd-rule" : var.ers_forw_rule_name + + pacemaker_cluster_name = var.pacemaker_cluster_name == "" ? "${local.sid}-cluster" : var.pacemaker_cluster_name + subnetwork_uri = length(local.subnetwork_split) > 1 ? ( + "projects/${local.subnetwork_split[0]}/regions/${local.region}/subnetworks/${local.subnetwork_split[1]}") : ( + "projects/${var.project_id}/regions/${local.region}/subnetworks/${var.subnetwork}") + + primary_startup_url = var.sap_deployment_debug ? replace(var.primary_startup_url, "bash -s", "bash -x -s") : var.primary_startup_url + secondary_startup_url = var.sap_deployment_debug ? replace(var.secondary_startup_url, "bash -s", "bash -x -s") : var.secondary_startup_url +} + +################################################################################ +# disks +################################################################################ +resource "google_compute_disk" "nw_boot_disks" { + count = 2 + name = count.index == 0 ? "${var.sap_primary_instance}-boot" : "${var.sap_secondary_instance}-boot" + type = "pd-balanced" + zone = count.index == 0 ? var.sap_primary_zone : var.sap_secondary_zone + size = 30 + image = "${var.linux_image_project}/${var.linux_image}" + project = var.project_id + + lifecycle { + # Ignores newer versions of the OS image. Removing this lifecycle + # and re-applying will cause the current disk to be deleted. + # All existing data will be lost. + ignore_changes = [image] + } +} + +resource "google_compute_disk" "nw_usr_sap_disks" { + count = 2 + name = count.index == 0 ? "${var.sap_primary_instance}-usrsap" : "${var.sap_secondary_instance}-usrsap" + type = "pd-balanced" + zone = count.index == 0 ? var.sap_primary_zone : var.sap_secondary_zone + size = var.usr_sap_size + project = var.project_id +} + +resource "google_compute_disk" "nw_sapmnt_disks" { + count = 2 + name = count.index == 0 ? "${var.sap_primary_instance}-sapmnt" : "${var.sap_secondary_instance}-sapmnt" + type = "pd-balanced" + zone = count.index == 0 ? var.sap_primary_zone : var.sap_secondary_zone + size = var.sap_mnt_size + project = var.project_id +} + +resource "google_compute_disk" "nw_swap_disks" { + count = var.swap_size > 0 ? 2 : 0 + name = count.index == 0 ? "${var.sap_primary_instance}-swap" : "${var.sap_secondary_instance}-swap" + type = "pd-balanced" + zone = count.index == 0 ? var.sap_primary_zone : var.sap_secondary_zone + size = var.swap_size + project = var.project_id +} + +################################################################################ +# VM VIPs +################################################################################ + +resource "google_compute_address" "sap_nw_vm_ip" { + count = 2 + name = count.index == 0 ? "${var.sap_primary_instance}-ip" : "${var.sap_secondary_instance}-ip" + subnetwork = local.subnetwork_uri + address_type = "INTERNAL" + region = local.region + project = var.project_id +} + +################################################################################ +# instances +################################################################################ +resource "google_compute_instance" "scs_instance" { + name = var.sap_primary_instance + machine_type = var.machine_type + zone = var.sap_primary_zone + project = var.project_id + + boot_disk { + auto_delete = true + device_name = "boot" + source = google_compute_disk.nw_boot_disks[0].self_link + } + + attached_disk { + device_name = google_compute_disk.nw_usr_sap_disks[0].name + source = google_compute_disk.nw_usr_sap_disks[0].self_link + } + attached_disk { + device_name = google_compute_disk.nw_sapmnt_disks[0].name + source = google_compute_disk.nw_sapmnt_disks[0].self_link + } + dynamic "attached_disk" { + for_each = var.swap_size > 0 ? [1] : [] + content { + device_name = google_compute_disk.nw_swap_disks[0].name + source = google_compute_disk.nw_swap_disks[0].self_link + } + } + + can_ip_forward = var.can_ip_forward + network_interface { + subnetwork = local.subnetwork_uri + network_ip = google_compute_address.sap_nw_vm_ip[0].address + # we only include access_config if public_ip is true, an empty access_config + # will create an ephemeral public ip + dynamic "access_config" { + for_each = var.public_ip ? [1] : [] + content { + } + } + } + tags = flatten([var.network_tags, local.hc_network_tag]) + service_account { + # An empty string service account will default to the projects default compute engine service account + email = var.service_account + scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + dynamic "reservation_affinity" { + for_each = length(var.primary_reservation_name) > 1 ? [1] : [] + content { + type = "SPECIFIC_RESERVATION" + specific_reservation { + key = "compute.googleapis.com/reservation-name" + values = [var.primary_reservation_name] + } + } + } + metadata = { + startup-script = local.primary_startup_url + + # SCS settings + sap_primary_instance = var.sap_primary_instance + sap_primary_zone = var.sap_primary_zone + scs_hc_port = local.scs_hc_port + scs_vip_address = google_compute_address.nw_vips[0].address + scs_vip_name = local.scs_vip_name + + # ERS settings + sap_secondary_instance = var.sap_secondary_instance + sap_secondary_zone = var.sap_secondary_zone + ers_hc_port = local.ers_hc_port + ers_vip_address = google_compute_address.nw_vips[1].address + ers_vip_name = local.ers_vip_name + + # File system settings + nfs_path = var.nfs_path + + # SAP system settings + sap_sid = upper(var.sap_sid) + sap_scs_instance_number = local.sap_scs_instance_number + sap_ers_instance_number = local.sap_ers_instance_number + sap_ascs = local.ascs + + # Pacemaker settings + pacemaker_cluster_name = local.pacemaker_cluster_name + + # Other + sap_deployment_debug = var.sap_deployment_debug ? "True" : "False" + post_deployment_script = var.post_deployment_script + template-type = "TERRAFORM" + } + + lifecycle { + # Ignore changes in the instance metadata, since it is modified by the SAP startup script. + ignore_changes = [metadata] + } +} + +resource "google_compute_instance" "ers_instance" { + name = var.sap_secondary_instance + machine_type = var.machine_type + zone = var.sap_secondary_zone + project = var.project_id + + boot_disk { + auto_delete = true + device_name = "boot" + source = google_compute_disk.nw_boot_disks[1].self_link + } + + attached_disk { + device_name = google_compute_disk.nw_usr_sap_disks[1].name + source = google_compute_disk.nw_usr_sap_disks[1].self_link + } + attached_disk { + device_name = google_compute_disk.nw_sapmnt_disks[1].name + source = google_compute_disk.nw_sapmnt_disks[1].self_link + } + dynamic "attached_disk" { + for_each = var.swap_size > 0 ? [1] : [] + content { + device_name = google_compute_disk.nw_swap_disks[1].name + source = google_compute_disk.nw_swap_disks[1].self_link + } + } + + can_ip_forward = var.can_ip_forward + network_interface { + subnetwork = local.subnetwork_uri + network_ip = google_compute_address.sap_nw_vm_ip[1].address + # we only include access_config if public_ip is true, an empty access_config + # will create an ephemeral public ip + dynamic "access_config" { + for_each = var.public_ip ? [1] : [] + content { + } + } + } + tags = flatten([var.network_tags, local.hc_network_tag]) + service_account { + # An empty string service account will default to the projects default compute engine service account + email = var.service_account + scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + dynamic "reservation_affinity" { + for_each = length(var.secondary_reservation_name) > 1 ? [1] : [] + content { + type = "SPECIFIC_RESERVATION" + specific_reservation { + key = "compute.googleapis.com/reservation-name" + values = [var.secondary_reservation_name] + } + } + } + metadata = { + startup-script = local.secondary_startup_url + + # SCS settings + sap_primary_instance = var.sap_primary_instance + sap_primary_zone = var.sap_primary_zone + scs_hc_port = local.scs_hc_port + scs_vip_address = google_compute_address.nw_vips[0].address + scs_vip_name = local.scs_vip_name + + # ERS settings + sap_secondary_instance = var.sap_secondary_instance + sap_secondary_zone = var.sap_secondary_zone + ers_hc_port = local.ers_hc_port + ers_vip_address = google_compute_address.nw_vips[1].address + ers_vip_name = local.ers_vip_name + + # File system settings + nfs_path = var.nfs_path + + # SAP system settings + sap_sid = upper(var.sap_sid) + sap_scs_instance_number = local.sap_scs_instance_number + sap_ers_instance_number = local.sap_ers_instance_number + sap_ascs = local.ascs + + # Pacemaker settings + pacemaker_cluster_name = local.pacemaker_cluster_name + + # Other + sap_deployment_debug = var.sap_deployment_debug ? "True" : "False" + post_deployment_script = var.post_deployment_script + template-type = "TERRAFORM" + } + + lifecycle { + # Ignore changes in the instance metadata, since it is modified by the SAP startup script. + ignore_changes = [metadata] + } +} +################################################################################ +# NW VIPs +################################################################################ +resource "google_compute_address" "nw_vips" { + count = 2 + name = count.index == 0 ? local.scs_vip_name : local.ers_vip_name + subnetwork = local.subnetwork_uri + address_type = "INTERNAL" + address = count.index == 0 ? local.scs_vip_address : local.ers_vip_address + region = count.index == 0 ? local.primary_region : local.secondary_region + project = var.project_id +} + +################################################################################ +# IGs +################################################################################ +resource "google_compute_instance_group" "nw_instance_groups" { + count = 2 + name = count.index == 0 ? local.scs_inst_group_name : local.ers_inst_group_name + instances = count.index == 0 ? google_compute_instance.scs_instance[*].self_link : google_compute_instance.ers_instance[*].self_link + zone = count.index == 0 ? var.sap_primary_zone : var.sap_secondary_zone + project = var.project_id +} + +################################################################################ +# Health Checks +################################################################################ +resource "google_compute_health_check" "nw_hc" { + count = 2 + name = count.index == 0 ? local.scs_hc_name : local.ers_hc_name + timeout_sec = 10 + check_interval_sec = 10 + healthy_threshold = 2 + unhealthy_threshold = 2 + project = var.project_id + + tcp_health_check { + port = count.index == 0 ? local.scs_hc_port : local.ers_hc_port + } +} + +################################################################################ +# Firewall rule for the Health Checks +################################################################################ +resource "google_compute_firewall" "nw_hc_firewall_rule" { + name = local.hc_firewall_rule_name + count = local.is_vpc_network ? 0 : 1 + network = var.network + direction = "INGRESS" + source_ranges = ["35.191.0.0/16", "130.211.0.0/22"] + target_tags = local.hc_network_tag + project = var.project_id + + allow { + protocol = "tcp" + ports = [local.scs_hc_port, local.ers_hc_port] + } +} + +################################################################################ +# Backend services +################################################################################ +resource "google_compute_region_backend_service" "nw_regional_backend_services" { + count = 2 + name = count.index == 0 ? local.scs_backend_svc_name : local.ers_backend_svc_name + region = local.region + load_balancing_scheme = "INTERNAL" + health_checks = [element(google_compute_health_check.nw_hc[*].id, count.index)] + project = var.project_id + + failover_policy { + disable_connection_drain_on_failover = true + drop_traffic_if_unhealthy = true + failover_ratio = 1 + } + backend { + group = element(google_compute_instance_group.nw_instance_groups[*].id, count.index) + failover = false + } + backend { + group = element(google_compute_instance_group.nw_instance_groups[*].id, 1 - count.index) + failover = true + } +} + +################################################################################ +# Forwarding Rules +################################################################################ +resource "google_compute_forwarding_rule" "nw_forwarding_rules" { + count = 2 + name = count.index == 0 ? local.scs_forw_rule_name : local.ers_forw_rule_name + ip_address = element(google_compute_address.nw_vips[*].address, count.index) + region = local.region + load_balancing_scheme = "INTERNAL" + backend_service = element(google_compute_region_backend_service.nw_regional_backend_services[*].id, count.index) + all_ports = true + subnetwork = local.subnetwork_uri + project = var.project_id +} diff --git a/modules/sap_nw_ha/outputs.tf b/modules/sap_nw_ha/outputs.tf new file mode 100644 index 00000000..2685aa73 --- /dev/null +++ b/modules/sap_nw_ha/outputs.tf @@ -0,0 +1,47 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +output "scs_instance" { + description = "SCS instance" + value = google_compute_instance.scs_instance.self_link +} +output "ers_instance" { + description = "ERS instance" + value = google_compute_instance.ers_instance.self_link +} +output "nw_vips" { + description = "NW virtual IPs" + value = google_compute_address.nw_vips[*].self_link +} +output "nw_instance_groups" { + description = "NW Instance Groups" + value = google_compute_instance_group.nw_instance_groups[*].self_link +} +output "nw_hc" { + description = "Health Checks" + value = google_compute_health_check.nw_hc[*].self_link +} +output "nw_hc_firewall" { + description = "Firewall rule for the Health Checks" + value = google_compute_firewall.nw_hc_firewall_rule[*].self_link +} +output "nw_regional_backend_services" { + description = "Backend Services" + value = google_compute_region_backend_service.nw_regional_backend_services[*].self_link +} +output "nw_forwarding_rules" { + description = "Forwarding rules" + value = google_compute_forwarding_rule.nw_forwarding_rules[*].self_link +} diff --git a/modules/sap_nw_ha/variables.tf b/modules/sap_nw_ha/variables.tf new file mode 100644 index 00000000..49680157 --- /dev/null +++ b/modules/sap_nw_ha/variables.tf @@ -0,0 +1,352 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +############################################################################## +## MANDATORY SETTINGS +############################################################################## +# +# General settings +# +variable "project_id" { + type = string + description = "Project id where the instances will be created" +} +variable "machine_type" { + type = string + description = "Machine type for the instances" +} +variable "network" { + type = string + description = "Network for the instances" +} +variable "subnetwork" { + type = string + description = "Subnetwork for the instances" +} +variable "linux_image" { + type = string + description = "Linux image name" +} +variable "linux_image_project" { + type = string + description = "Linux image project" +} +# +# SCS settings +# +variable "sap_primary_instance" { + type = string + description = "Name of first instance (initial SCS location)" + validation { + condition = can(regex("^[a-z0-9\\-]+$", var.sap_primary_instance)) && length(var.sap_primary_instance) <= 13 + error_message = "The sap_primary_instance must consist of lowercase letters (a-z), numbers, and hyphens and be less than 14 characters long." + } +} +variable "sap_primary_zone" { + type = string + description = "Zone where the first instance will be created" +} +# +# ERS settings +# +variable "sap_secondary_instance" { + type = string + description = "Name of second instance (initial ERS location)" + validation { + condition = can(regex("^[a-z0-9\\-]+$", var.sap_secondary_instance)) && length(var.sap_secondary_instance) <= 13 + error_message = "The sap_secondary_instance must consist of lowercase letters (a-z), numbers, and hyphens and be less than 14 characters long." + } +} +variable "sap_secondary_zone" { + type = string + description = "Zone where the second instance will be created" +} +# +# File system settings +# +variable "nfs_path" { + type = string + description = "NFS path for shared file system, e.g. 10.163.58.114:/ssd" + validation { + condition = var.nfs_path == "" || can(regex("(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}:\\/[^[:space:]]*", var.nfs_path)) + error_message = "The nfs_path must be an IP address followed by ':/' then some name." + } +} +# +# SAP system settings +# +variable "sap_sid" { + type = string + description = "SAP System ID" + validation { + condition = length(var.sap_sid) == 3 && can(regex("[A-Z][A-Z0-9]{2}", var.sap_sid)) + error_message = "The sap_sid must be 3 characters long and start with a letter and all letters must be capitalized." + } +} + +############################################################################## +## OPTIONAL SETTINGS (default values will be determined/calculated) +############################################################################## +# +# General settings +# +variable "hc_network_tag" { + type = list(string) + default = [] + description = "Network tag for the health check firewall rule" +} +variable "hc_firewall_rule_name" { + type = string + default = "" + description = "Name of firewall rule for the health check" +} +# +# SCS settings +# +variable "scs_inst_group_name" { + type = string + default = "" + description = "Name of SCS instance group" +} +variable "scs_hc_name" { + type = string + default = "" + description = "Name of SCS health check" +} +variable "scs_hc_port" { + type = string + default = "" + description = "Port of SCS health check" +} +variable "scs_vip_name" { + type = string + default = "" + description = "Name of SCS virtual IP" +} +variable "scs_vip_address" { + type = string + default = "" + description = "Address of SCS virtual IP" +} +variable "scs_backend_svc_name" { + type = string + default = "" + description = "Name of SCS backend service" +} +variable "scs_forw_rule_name" { + type = string + default = "" + description = "Name of SCS forwarding rule" +} +# +# ERS settings +# +variable "ers_inst_group_name" { + type = string + default = "" + description = "Name of ERS instance group" +} +variable "ers_hc_name" { + type = string + default = "" + description = "Name of ERS health check" +} +variable "ers_hc_port" { + type = string + default = "" + description = "Port of ERS health check" +} +variable "ers_vip_name" { + type = string + default = "" + description = "Name of ERS virtual IP" +} +variable "ers_vip_address" { + type = string + default = "" + description = "Address of ERS virtual IP" +} +variable "ers_backend_svc_name" { + type = string + default = "" + description = "Name of ERS backend service" +} +variable "ers_forw_rule_name" { + type = string + default = "" + description = "Name of ERS forwarding rule" +} +# +# File system settings +# +variable "usr_sap_size" { + type = number + default = 8 + description = "Size of /usr/sap in GB" + validation { + condition = var.usr_sap_size >= 8 + error_message = "Size of /usr/sap must be larger than 8 GB." + } +} +variable "sap_mnt_size" { + type = number + default = 8 + description = "Size of /sapmnt in GB" + + validation { + condition = var.sap_mnt_size >= 8 + error_message = "Size of /sapmnt must be larger than 8 GB." + } +} +variable "swap_size" { + type = number + default = 8 + description = "Size in GB of swap volume" + + validation { + condition = var.swap_size >= 8 + error_message = "Size of swap must be larger than 8 GB." + } +} +# +# SAP system settings +# +variable "sap_scs_instance_number" { + type = string + default = "00" + description = "SCS instance number" + validation { + condition = length(var.sap_scs_instance_number) == 2 && tonumber(var.sap_scs_instance_number) >= 0 + error_message = "The length of sap_scs_instance_number must be 2. If you'd like a single digit (x) then enter it as (0x)." + } +} +variable "sap_ers_instance_number" { + type = string + default = "10" + description = "ERS instance number" + validation { + condition = length(var.sap_ers_instance_number) == 2 && tonumber(var.sap_ers_instance_number) >= 0 + error_message = "The length of sap_ers_instance_number must be 2. If you'd like a single digit (x) then enter it as (0x)." + } +} +variable "sap_nw_abap" { + type = bool + default = true + description = "Is this a Netweaver ABAP installation. Set 'false' for NW Java. Dual stack is not supported by this script." +} +# +# Pacemaker settings +# +variable "pacemaker_cluster_name" { + type = string + default = "" + description = "Name of Pacemaker cluster." +} +# +# Optional Settings +# +variable "public_ip" { + type = bool + default = false + description = "Create an ephemeral public ip for the instances" +} +variable "service_account" { + type = string + default = "" + description = <<-EOT + Service account that will be used as the service account on the created instance. + Leave this blank to use the project default service account + EOT +} +variable "network_tags" { + type = list(string) + default = [] + description = "Network tags to apply to the instances" +} +variable "sap_deployment_debug" { + type = bool + default = false + description = "Debug log level for deployment" +} +variable "primary_reservation_name" { + type = string + description = <<-EOT + Use a reservation specified by RESERVATION_NAME. + By default ANY_RESERVATION is used when this variable is empty. + In order for a reservation to be used it must be created with the + "Select specific reservation" selected (specificReservationRequired set to true) + Be sure to create your reservation with the correct Min CPU Platform for the + following instance types: + n1-highmem-32 : Intel Broadwell + n1-highmem-64 : Intel Broadwell + n1-highmem-96 : Intel Skylake + n1-megamem-96 : Intel Skylake + m1-megamem-96 : "Intel Skylake" + All other instance types can have automatic Min CPU Platform" + EOT + default = "" +} + +variable "secondary_reservation_name" { + type = string + description = <<-EOT + Use a reservation specified by RESERVATION_NAME. + By default ANY_RESERVATION is used when this variable is empty. + In order for a reservation to be used it must be created with the + "Select specific reservation" selected (specificReservationRequired set to true) + Be sure to create your reservation with the correct Min CPU Platform for the + following instance types: + n1-highmem-32 : Intel Broadwell + n1-highmem-64 : Intel Broadwell + n1-highmem-96 : Intel Skylake + n1-megamem-96 : Intel Skylake + m1-megamem-96 : "Intel Skylake" + All other instance types can have automatic Min CPU Platform" + EOT + default = "" +} +# +# DO NOT MODIFY unless you know what you are doing +# +variable "primary_startup_url" { + type = string + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_nw_ha/startup_scs.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform" + description = "DO NOT USE" +} +variable "secondary_startup_url" { + type = string + default = "curl -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform/sap_nw_ha/startup_ers.sh | bash -s https://www.googleapis.com/storage/v1/core-connect-dm-templates/202404101403/terraform" + description = "DO NOT USE" +} +variable "post_deployment_script" { + type = string + default = "" + description = <<-EOT + Specifies the location of a script to run after the deployment is complete. + The script should be hosted on a web server or in a GCS bucket. The URL should + begin with http:// https:// or gs://. Note that this script will be executed + on all VM's that the template creates. If you only want to run it on the master + instance you will need to add a check at the top of your script. + EOT +} + +# +# DO NOT MODIFY unless you know what you are doing +# +variable "can_ip_forward" { + type = bool + description = "Whether sending and receiving of packets with non-matching source or destination IPs is allowed." + default = true +} diff --git a/modules/sap_nw_ha/versions.tf b/modules/sap_nw_ha/versions.tf new file mode 100644 index 00000000..fb459560 --- /dev/null +++ b/modules/sap_nw_ha/versions.tf @@ -0,0 +1,24 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +terraform { + required_version = ">=0.12.6" + required_providers { + google = { + source = "hashicorp/google" + version = ">= 4.0.0, < 6" + } + } +}