From d8beb9b9ec22ae7186fd621570240539965f1c47 Mon Sep 17 00:00:00 2001
From: ImpSy <3097030+ImpSy@users.noreply.github.com>
Date: Fri, 31 Mar 2023 15:43:46 +0200
Subject: [PATCH] remove ofas_managed_load_balancer variable
---
README.md | 7 +++++--
examples/from-scratch-with-private-link/main.tf | 2 +-
main.tf | 2 +-
variables.tf | 6 ------
4 files changed, 7 insertions(+), 10 deletions(-)
diff --git a/README.md b/README.md
index 2e2b4c0..9126549 100644
--- a/README.md
+++ b/README.md
@@ -125,7 +125,7 @@ Folder [`examples/import-ocean-cluster/`](https://github.com/spotinst/terraform-
### v2 migration guide
-By default the Ocean Spark deployer jobs now run in the kube-system namespace.
+#### By default the Ocean Spark deployer jobs now run in the kube-system namespace.
To avoid issues for existing clusters you will need to set the following line:
```diff
@@ -137,6 +137,10 @@ module "ocean-spark" {
}
```
+#### Deprecated `ofas_managed_load_balancer` variable has been deleted
+
+Use `ingress_managed_load_balancer` instead
+
### v1 migration guide
This migration revolves around 1 topic:
@@ -206,7 +210,6 @@ No modules.
| [ingress\_private\_link\_endpoint\_service\_address](#input\_ingress\_private\_link\_endpoint\_service\_address) | The name of the VPC Endpoint Service the Ocean for Apache Spark control plane should bind to when privatelink is enabled | `string` | `null` | no |
| [log\_collection\_collect\_driver\_logs](#input\_log\_collection\_collect\_driver\_logs) | Controls whether the Ocean Spark cluster will collect Spark driver logs | `bool` | `true` | no |
| [ocean\_cluster\_id](#input\_ocean\_cluster\_id) | Specifies the Ocean cluster identifier | `string` | n/a | yes |
-| [ofas\_managed\_load\_balancer](#input\_ofas\_managed\_load\_balancer) | Controls whether a load balancer managed by Ocean for Apache Spark will be provisioned for the cluster (deprecated: use ingress\_managed\_load\_balancer instead) | `bool` | `null` | no |
| [spark\_additional\_app\_namespaces](#input\_spark\_additional\_app\_namespaces) | List of Kubernetes namespaces that should be configured to run Spark applications, in addition to the default 'spark-apps' namespace | `list(string)` | `[]` | no |
| [webhook\_host\_network\_ports](#input\_webhook\_host\_network\_ports) | Assign a list of ports on the host networks for our system pods | `list(number)` | `[]` | no |
| [webhook\_use\_host\_network](#input\_webhook\_use\_host\_network) | Controls whether Ocean Spark system pods that expose webhooks will use the host network | `bool` | `false` | no |
diff --git a/examples/from-scratch-with-private-link/main.tf b/examples/from-scratch-with-private-link/main.tf
index 6fb2de8..9883bcc 100644
--- a/examples/from-scratch-with-private-link/main.tf
+++ b/examples/from-scratch-with-private-link/main.tf
@@ -305,7 +305,7 @@ module "ocean-spark" {
ocean_cluster_id = module.ocean-aws-k8s.ocean_id
- ofas_managed_load_balancer = false
+ ingress_managed_load_balancer = false
ingress_load_balancer_target_group_arn = aws_lb_target_group.this.arn
diff --git a/main.tf b/main.tf
index fb57219..511e38d 100644
--- a/main.tf
+++ b/main.tf
@@ -54,7 +54,7 @@ resource "spotinst_ocean_spark" "cluster" {
managed = var.ingress_managed_controller
}
load_balancer {
- managed = try(var.ofas_managed_load_balancer, var.ingress_managed_load_balancer)
+ managed = var.ingress_managed_load_balancer
target_group_arn = var.ingress_load_balancer_target_group_arn
service_annotations = var.ingress_load_balancer_service_annotations
}
diff --git a/variables.tf b/variables.tf
index d813619..d5e3aa2 100644
--- a/variables.tf
+++ b/variables.tf
@@ -27,12 +27,6 @@ variable "ingress_managed_controller" {
default = true
}
-variable "ofas_managed_load_balancer" {
- type = bool
- description = "Controls whether a load balancer managed by Ocean for Apache Spark will be provisioned for the cluster (deprecated: use ingress_managed_load_balancer instead)"
- default = null
-}
-
variable "ingress_managed_load_balancer" {
type = bool
description = "Controls whether a load balancer managed by Ocean for Apache Spark will be provisioned for the cluster"