diff --git a/.github/workflows/validate-codeowners.yml b/.github/workflows/validate-codeowners.yml
index 70f829e3..4b4a2264 100644
--- a/.github/workflows/validate-codeowners.yml
+++ b/.github/workflows/validate-codeowners.yml
@@ -10,6 +10,7 @@ jobs:
steps:
- name: "Checkout source code at current commit"
uses: actions/checkout@v2
+ # Leave pinned at 0.7.1 until https://github.com/mszostok/codeowners-validator/issues/173 is resolved
- uses: mszostok/codeowners-validator@v0.7.1
if: github.event.pull_request.head.repo.full_name == github.repository
name: "Full check of CODEOWNERS"
diff --git a/README.md b/README.md
index 25855c9f..ea29d026 100644
--- a/README.md
+++ b/README.md
@@ -197,8 +197,7 @@ For automated tests of the complete example using [bats](https://github.com/bats
Other examples:
-- [terraform-root-modules/eks](https://github.com/cloudposse/terraform-root-modules/tree/master/aws/eks) - Cloud Posse's service catalog of "root module" invocations for provisioning reference architectures
-- [terraform-root-modules/eks-backing-services-peering](https://github.com/cloudposse/terraform-root-modules/tree/master/aws/eks-backing-services-peering) - example of VPC peering between the EKS VPC and backing services VPC
+- [terraform-aws-components/eks/cluster](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/eks/cluster) - Cloud Posse's service catalog of "root module" invocations for provisioning reference architectures
```hcl
provider "aws" {
@@ -316,7 +315,7 @@ Module usage with two unmanaged worker groups:
cluster_name = module.label.id
cluster_endpoint = module.eks_cluster.eks_cluster_endpoint
cluster_certificate_authority_data = module.eks_cluster.eks_cluster_certificate_authority_data
- cluster_security_group_id = module.eks_cluster.security_group_id
+ cluster_security_group_id = module.eks_cluster.eks_cluster_managed_security_group_id
# Auto-scaling policies and CloudWatch metric alarms
autoscaling_policies_enabled = var.autoscaling_policies_enabled
@@ -343,7 +342,7 @@ Module usage with two unmanaged worker groups:
cluster_name = module.label.id
cluster_endpoint = module.eks_cluster.eks_cluster_endpoint
cluster_certificate_authority_data = module.eks_cluster.eks_cluster_certificate_authority_data
- cluster_security_group_id = module.eks_cluster.security_group_id
+ cluster_security_group_id = module.eks_cluster.eks_cluster_managed_security_group_id
# Auto-scaling policies and CloudWatch metric alarms
autoscaling_policies_enabled = var.autoscaling_policies_enabled
@@ -393,7 +392,7 @@ Available targets:
| Name | Version |
|------|---------|
-| [terraform](#requirement\_terraform) | >= 0.14.0 |
+| [terraform](#requirement\_terraform) | >= 1.0.0 |
| [aws](#requirement\_aws) | >= 3.38 |
| [kubernetes](#requirement\_kubernetes) | >= 2.7.1 |
| [null](#requirement\_null) | >= 2.0 |
@@ -515,7 +514,7 @@ Available targets:
| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no |
| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no |
| [vpc\_id](#input\_vpc\_id) | VPC ID for the EKS cluster | `string` | n/a | yes |
-| [wait\_for\_cluster\_command](#input\_wait\_for\_cluster\_command) | `local-exec` command to execute to determine if the EKS cluster is healthy. Cluster endpoint URL is available as environment variable `ENDPOINT` | `string` | `"curl --silent --fail --retry 30 --retry-delay 10 --retry-connrefused --max-time 11 --insecure --output /dev/null $ENDPOINT/healthz"` | no |
+| [wait\_for\_cluster\_command](#input\_wait\_for\_cluster\_command) | `local-exec` command to execute to determine if the EKS cluster is healthy. Cluster endpoint URL is available as environment variable `ENDPOINT` | `string` | `"if test -n \"$ENDPOINT\"; then curl --silent --fail --retry 30 --retry-delay 10 --retry-connrefused --max-time 11 --insecure --output /dev/null $ENDPOINT/healthz; fi"` | no |
| [workers\_role\_arns](#input\_workers\_role\_arns) | List of Role ARNs of the worker nodes | `list(string)` | `[]` | no |
| [workers\_security\_group\_ids](#input\_workers\_security\_group\_ids) | DEPRECATED: Use `allowed_security_group_ids` instead.
Historical description: Security Group IDs of the worker nodes.
Historical default: `[]` | `list(string)` | `[]` | no |
@@ -535,13 +534,13 @@ Available targets:
| [eks\_cluster\_id](#output\_eks\_cluster\_id) | The name of the cluster |
| [eks\_cluster\_identity\_oidc\_issuer](#output\_eks\_cluster\_identity\_oidc\_issuer) | The OIDC Identity issuer for the cluster |
| [eks\_cluster\_identity\_oidc\_issuer\_arn](#output\_eks\_cluster\_identity\_oidc\_issuer\_arn) | The OIDC Identity issuer ARN for the cluster that can be used to associate IAM roles with a service account |
-| [eks\_cluster\_managed\_security\_group\_id](#output\_eks\_cluster\_managed\_security\_group\_id) | Security Group ID that was created by EKS for the cluster. EKS creates a Security Group and applies it to ENI that is attached to EKS Control Plane master nodes and to any managed workloads |
+| [eks\_cluster\_managed\_security\_group\_id](#output\_eks\_cluster\_managed\_security\_group\_id) | Security Group ID that was created by EKS for the cluster.
EKS creates a Security Group and applies it to the ENI that are attached to EKS Control Plane master nodes and to any managed workloads. |
| [eks\_cluster\_role\_arn](#output\_eks\_cluster\_role\_arn) | ARN of the EKS cluster IAM role |
| [eks\_cluster\_version](#output\_eks\_cluster\_version) | The Kubernetes server version of the cluster |
| [kubernetes\_config\_map\_id](#output\_kubernetes\_config\_map\_id) | ID of `aws-auth` Kubernetes ConfigMap |
-| [security\_group\_arn](#output\_security\_group\_arn) | ARN of the created Security Group for the EKS cluster |
-| [security\_group\_id](#output\_security\_group\_id) | ID of the created Security Group for the EKS cluster |
-| [security\_group\_name](#output\_security\_group\_name) | Name of the created Security Group for the EKS cluster |
+| [security\_group\_arn](#output\_security\_group\_arn) | (Deprecated) ARN of the optionally created additional Security Group for the EKS cluster |
+| [security\_group\_id](#output\_security\_group\_id) | (Deprecated) ID of the optionally created additional Security Group for the EKS cluster |
+| [security\_group\_name](#output\_security\_group\_name) | Name of the optionally created additional Security Group for the EKS cluster |
diff --git a/README.yaml b/README.yaml
index 935786f6..1ed8d6e4 100644
--- a/README.yaml
+++ b/README.yaml
@@ -157,8 +157,7 @@ usage: |2-
Other examples:
- - [terraform-root-modules/eks](https://github.com/cloudposse/terraform-root-modules/tree/master/aws/eks) - Cloud Posse's service catalog of "root module" invocations for provisioning reference architectures
- - [terraform-root-modules/eks-backing-services-peering](https://github.com/cloudposse/terraform-root-modules/tree/master/aws/eks-backing-services-peering) - example of VPC peering between the EKS VPC and backing services VPC
+ - [terraform-aws-components/eks/cluster](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/eks/cluster) - Cloud Posse's service catalog of "root module" invocations for provisioning reference architectures
```hcl
provider "aws" {
@@ -276,7 +275,7 @@ usage: |2-
cluster_name = module.label.id
cluster_endpoint = module.eks_cluster.eks_cluster_endpoint
cluster_certificate_authority_data = module.eks_cluster.eks_cluster_certificate_authority_data
- cluster_security_group_id = module.eks_cluster.security_group_id
+ cluster_security_group_id = module.eks_cluster.eks_cluster_managed_security_group_id
# Auto-scaling policies and CloudWatch metric alarms
autoscaling_policies_enabled = var.autoscaling_policies_enabled
@@ -303,7 +302,7 @@ usage: |2-
cluster_name = module.label.id
cluster_endpoint = module.eks_cluster.eks_cluster_endpoint
cluster_certificate_authority_data = module.eks_cluster.eks_cluster_certificate_authority_data
- cluster_security_group_id = module.eks_cluster.security_group_id
+ cluster_security_group_id = module.eks_cluster.eks_cluster_managed_security_group_id
# Auto-scaling policies and CloudWatch metric alarms
autoscaling_policies_enabled = var.autoscaling_policies_enabled
diff --git a/auth.tf b/auth.tf
index e99a8093..59a109d0 100644
--- a/auth.tf
+++ b/auth.tf
@@ -38,10 +38,10 @@ locals {
exec_profile = local.kube_exec_auth_enabled && var.kube_exec_auth_aws_profile_enabled ? ["--profile", var.kube_exec_auth_aws_profile] : []
exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? ["--role-arn", var.kube_exec_auth_role_arn] : []
- cluster_endpoint_data = join("", aws_eks_cluster.default.*.endpoint)
+ cluster_endpoint_data = join("", aws_eks_cluster.default[*].endpoint) # use `join` instead of `one` to keep the value a string
cluster_auth_map_endpoint = var.apply_config_map_aws_auth ? local.cluster_endpoint_data : var.dummy_kubeapi_server
- certificate_authority_data_list = coalescelist(aws_eks_cluster.default.*.certificate_authority, [[{ data : "" }]])
+ certificate_authority_data_list = coalescelist(aws_eks_cluster.default[*].certificate_authority, [[{ data : "" }]])
certificate_authority_data_list_internal = local.certificate_authority_data_list[0]
certificate_authority_data_map = local.certificate_authority_data_list_internal[0]
certificate_authority_data = local.certificate_authority_data_map["data"]
@@ -50,9 +50,9 @@ locals {
# Note that we don't need to do this for managed Node Groups since EKS adds their roles to the ConfigMap automatically
map_worker_roles = [
for role_arn in var.workers_role_arns : {
- rolearn : role_arn
- username : "system:node:{{EC2PrivateDNSName}}"
- groups : [
+ rolearn = role_arn
+ username = "system:node:{{EC2PrivateDNSName}}"
+ groups = [
"system:bootstrappers",
"system:nodes"
]
@@ -62,13 +62,13 @@ locals {
resource "null_resource" "wait_for_cluster" {
count = local.enabled && var.apply_config_map_aws_auth ? 1 : 0
- depends_on = [aws_eks_cluster.default[0]]
+ depends_on = [aws_eks_cluster.default]
provisioner "local-exec" {
command = var.wait_for_cluster_command
interpreter = var.local_exec_interpreter
environment = {
- ENDPOINT = aws_eks_cluster.default[0].endpoint
+ ENDPOINT = local.cluster_endpoint_data
}
}
}
@@ -84,7 +84,7 @@ resource "null_resource" "wait_for_cluster" {
#
data "aws_eks_cluster_auth" "eks" {
count = local.kube_data_auth_enabled ? 1 : 0
- name = join("", aws_eks_cluster.default.*.id)
+ name = one(aws_eks_cluster.default[*].id)
}
@@ -99,25 +99,25 @@ provider "kubernetes" {
# If this solution bothers you, you can disable it by setting var.dummy_kubeapi_server = null
host = local.cluster_auth_map_endpoint
cluster_ca_certificate = local.enabled && !local.kubeconfig_path_enabled ? base64decode(local.certificate_authority_data) : null
- token = local.kube_data_auth_enabled ? data.aws_eks_cluster_auth.eks[0].token : null
+ token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null
# The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster
# in KUBECONFIG is some other cluster, this will cause problems, so we override it always.
config_path = local.kubeconfig_path_enabled ? var.kubeconfig_path : ""
config_context = var.kubeconfig_context
dynamic "exec" {
- for_each = local.kube_exec_auth_enabled ? ["exec"] : []
+ for_each = local.kube_exec_auth_enabled && length(local.cluster_endpoint_data) > 0 ? ["exec"] : []
content {
api_version = "client.authentication.k8s.io/v1beta1"
command = "aws"
- args = concat(local.exec_profile, ["eks", "get-token", "--cluster-name", aws_eks_cluster.default[0].id], local.exec_role)
+ args = concat(local.exec_profile, ["eks", "get-token", "--cluster-name", try(aws_eks_cluster.default[0].id, "deleted")], local.exec_role)
}
}
}
resource "kubernetes_config_map" "aws_auth_ignore_changes" {
count = local.enabled && var.apply_config_map_aws_auth && var.kubernetes_config_map_ignore_role_changes ? 1 : 0
- depends_on = [null_resource.wait_for_cluster[0]]
+ depends_on = [null_resource.wait_for_cluster]
metadata {
name = "aws-auth"
@@ -137,7 +137,7 @@ resource "kubernetes_config_map" "aws_auth_ignore_changes" {
resource "kubernetes_config_map" "aws_auth" {
count = local.enabled && var.apply_config_map_aws_auth && var.kubernetes_config_map_ignore_role_changes == false ? 1 : 0
- depends_on = [null_resource.wait_for_cluster[0]]
+ depends_on = [null_resource.wait_for_cluster]
metadata {
name = "aws-auth"
diff --git a/docs/terraform.md b/docs/terraform.md
index c980a833..bb571593 100644
--- a/docs/terraform.md
+++ b/docs/terraform.md
@@ -3,7 +3,7 @@
| Name | Version |
|------|---------|
-| [terraform](#requirement\_terraform) | >= 0.14.0 |
+| [terraform](#requirement\_terraform) | >= 1.0.0 |
| [aws](#requirement\_aws) | >= 3.38 |
| [kubernetes](#requirement\_kubernetes) | >= 2.7.1 |
| [null](#requirement\_null) | >= 2.0 |
@@ -125,7 +125,7 @@
| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no |
| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no |
| [vpc\_id](#input\_vpc\_id) | VPC ID for the EKS cluster | `string` | n/a | yes |
-| [wait\_for\_cluster\_command](#input\_wait\_for\_cluster\_command) | `local-exec` command to execute to determine if the EKS cluster is healthy. Cluster endpoint URL is available as environment variable `ENDPOINT` | `string` | `"curl --silent --fail --retry 30 --retry-delay 10 --retry-connrefused --max-time 11 --insecure --output /dev/null $ENDPOINT/healthz"` | no |
+| [wait\_for\_cluster\_command](#input\_wait\_for\_cluster\_command) | `local-exec` command to execute to determine if the EKS cluster is healthy. Cluster endpoint URL is available as environment variable `ENDPOINT` | `string` | `"if test -n \"$ENDPOINT\"; then curl --silent --fail --retry 30 --retry-delay 10 --retry-connrefused --max-time 11 --insecure --output /dev/null $ENDPOINT/healthz; fi"` | no |
| [workers\_role\_arns](#input\_workers\_role\_arns) | List of Role ARNs of the worker nodes | `list(string)` | `[]` | no |
| [workers\_security\_group\_ids](#input\_workers\_security\_group\_ids) | DEPRECATED: Use `allowed_security_group_ids` instead.
Historical description: Security Group IDs of the worker nodes.
Historical default: `[]` | `list(string)` | `[]` | no |
@@ -145,11 +145,11 @@
| [eks\_cluster\_id](#output\_eks\_cluster\_id) | The name of the cluster |
| [eks\_cluster\_identity\_oidc\_issuer](#output\_eks\_cluster\_identity\_oidc\_issuer) | The OIDC Identity issuer for the cluster |
| [eks\_cluster\_identity\_oidc\_issuer\_arn](#output\_eks\_cluster\_identity\_oidc\_issuer\_arn) | The OIDC Identity issuer ARN for the cluster that can be used to associate IAM roles with a service account |
-| [eks\_cluster\_managed\_security\_group\_id](#output\_eks\_cluster\_managed\_security\_group\_id) | Security Group ID that was created by EKS for the cluster. EKS creates a Security Group and applies it to ENI that is attached to EKS Control Plane master nodes and to any managed workloads |
+| [eks\_cluster\_managed\_security\_group\_id](#output\_eks\_cluster\_managed\_security\_group\_id) | Security Group ID that was created by EKS for the cluster.
EKS creates a Security Group and applies it to the ENI that are attached to EKS Control Plane master nodes and to any managed workloads. |
| [eks\_cluster\_role\_arn](#output\_eks\_cluster\_role\_arn) | ARN of the EKS cluster IAM role |
| [eks\_cluster\_version](#output\_eks\_cluster\_version) | The Kubernetes server version of the cluster |
| [kubernetes\_config\_map\_id](#output\_kubernetes\_config\_map\_id) | ID of `aws-auth` Kubernetes ConfigMap |
-| [security\_group\_arn](#output\_security\_group\_arn) | ARN of the created Security Group for the EKS cluster |
-| [security\_group\_id](#output\_security\_group\_id) | ID of the created Security Group for the EKS cluster |
-| [security\_group\_name](#output\_security\_group\_name) | Name of the created Security Group for the EKS cluster |
+| [security\_group\_arn](#output\_security\_group\_arn) | (Deprecated) ARN of the optionally created additional Security Group for the EKS cluster |
+| [security\_group\_id](#output\_security\_group\_id) | (Deprecated) ID of the optionally created additional Security Group for the EKS cluster |
+| [security\_group\_name](#output\_security\_group\_name) | Name of the optionally created additional Security Group for the EKS cluster |
diff --git a/examples/complete/fixtures.us-east-2.tfvars b/examples/complete/fixtures.us-east-2.tfvars
index e1250d6a..99f08246 100644
--- a/examples/complete/fixtures.us-east-2.tfvars
+++ b/examples/complete/fixtures.us-east-2.tfvars
@@ -9,7 +9,7 @@ stage = "test"
name = "eks"
# When updating the Kubernetes version, also update the API and client-go version in test/src/go.mod
-kubernetes_version = "1.21"
+kubernetes_version = "1.22"
oidc_provider_enabled = true
diff --git a/iam.tf b/iam.tf
index bb40bbe1..a2eb9382 100644
--- a/iam.tf
+++ b/iam.tf
@@ -1,7 +1,7 @@
locals {
create_eks_service_role = local.enabled && var.create_eks_service_role
- eks_service_role_arn = local.create_eks_service_role ? join("", aws_iam_role.default.*.arn) : var.eks_cluster_service_role_arn
+ eks_service_role_arn = local.create_eks_service_role ? one(aws_iam_role.default[*].arn) : var.eks_cluster_service_role_arn
}
data "aws_iam_policy_document" "assume_role" {
@@ -22,7 +22,7 @@ resource "aws_iam_role" "default" {
count = local.create_eks_service_role ? 1 : 0
name = module.label.id
- assume_role_policy = join("", data.aws_iam_policy_document.assume_role.*.json)
+ assume_role_policy = one(data.aws_iam_policy_document.assume_role[*].json)
tags = module.label.tags
permissions_boundary = var.permissions_boundary
}
@@ -30,15 +30,15 @@ resource "aws_iam_role" "default" {
resource "aws_iam_role_policy_attachment" "amazon_eks_cluster_policy" {
count = local.create_eks_service_role ? 1 : 0
- policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSClusterPolicy", join("", data.aws_partition.current.*.partition))
- role = join("", aws_iam_role.default.*.name)
+ policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSClusterPolicy", one(data.aws_partition.current[*].partition))
+ role = one(aws_iam_role.default[*].name)
}
resource "aws_iam_role_policy_attachment" "amazon_eks_service_policy" {
count = local.create_eks_service_role ? 1 : 0
- policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSServicePolicy", join("", data.aws_partition.current.*.partition))
- role = join("", aws_iam_role.default.*.name)
+ policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSServicePolicy", one(data.aws_partition.current[*].partition))
+ role = one(aws_iam_role.default[*].name)
}
# AmazonEKSClusterPolicy managed policy doesn't contain all necessary permissions to create
@@ -77,12 +77,12 @@ resource "aws_iam_policy" "cluster_elb_service_role" {
count = local.create_eks_service_role ? 1 : 0
name = "${module.label.id}-ServiceRole"
- policy = join("", data.aws_iam_policy_document.cluster_elb_service_role.*.json)
+ policy = one(data.aws_iam_policy_document.cluster_elb_service_role[*].json)
}
resource "aws_iam_role_policy_attachment" "cluster_elb_service_role" {
count = local.create_eks_service_role ? 1 : 0
- policy_arn = aws_iam_policy.cluster_elb_service_role[0].arn
- role = join("", aws_iam_role.default.*.name)
+ policy_arn = one(aws_iam_policy.cluster_elb_service_role[*].arn)
+ role = one(aws_iam_role.default[*].name)
}
diff --git a/main.tf b/main.tf
index 55736ef0..f2ce2656 100644
--- a/main.tf
+++ b/main.tf
@@ -7,7 +7,7 @@ locals {
resources = var.cluster_encryption_config_resources
provider_key_arn = local.enabled && var.cluster_encryption_config_enabled && var.cluster_encryption_config_kms_key_id == "" ? (
- join("", aws_kms_key.cluster.*.arn)
+ one(aws_kms_key.cluster[*].arn)
) : var.cluster_encryption_config_kms_key_id
}
@@ -47,7 +47,7 @@ resource "aws_kms_key" "cluster" {
resource "aws_kms_alias" "cluster" {
count = local.enabled && var.cluster_encryption_config_enabled && var.cluster_encryption_config_kms_key_id == "" ? 1 : 0
name = format("alias/%v", module.label.id)
- target_key_id = join("", aws_kms_key.cluster.*.key_id)
+ target_key_id = one(aws_kms_key.cluster[*].key_id)
}
resource "aws_eks_cluster" "default" {
@@ -72,7 +72,7 @@ resource "aws_eks_cluster" "default" {
}
vpc_config {
- security_group_ids = var.create_security_group ? compact(concat(var.associated_security_group_ids, [join("", aws_security_group.default.*.id)])) : var.associated_security_group_ids
+ security_group_ids = var.create_security_group ? compact(concat(var.associated_security_group_ids, [one(aws_security_group.default[*].id)])) : var.associated_security_group_ids
subnet_ids = var.subnet_ids
endpoint_private_access = var.endpoint_private_access
#bridgecrew:skip=BC_AWS_KUBERNETES_2:Let user decide on public access
@@ -118,16 +118,16 @@ resource "aws_eks_cluster" "default" {
data "tls_certificate" "cluster" {
count = local.enabled && var.oidc_provider_enabled ? 1 : 0
- url = join("", aws_eks_cluster.default.*.identity.0.oidc.0.issuer)
+ url = one(aws_eks_cluster.default[*].identity.0.oidc.0.issuer)
}
resource "aws_iam_openid_connect_provider" "default" {
count = local.enabled && var.oidc_provider_enabled ? 1 : 0
- url = join("", aws_eks_cluster.default.*.identity.0.oidc.0.issuer)
+ url = one(aws_eks_cluster.default[*].identity.0.oidc.0.issuer)
tags = module.label.tags
client_id_list = ["sts.amazonaws.com"]
- thumbprint_list = [join("", data.tls_certificate.cluster.*.certificates.0.sha1_fingerprint)]
+ thumbprint_list = [one(data.tls_certificate.cluster[*].certificates.0.sha1_fingerprint)]
}
resource "aws_eks_addon" "cluster" {
@@ -136,7 +136,7 @@ resource "aws_eks_addon" "cluster" {
addon.addon_name => addon
} : {}
- cluster_name = join("", aws_eks_cluster.default.*.name)
+ cluster_name = one(aws_eks_cluster.default[*].name)
addon_name = each.key
addon_version = lookup(each.value, "addon_version", null)
resolve_conflicts = lookup(each.value, "resolve_conflicts", null)
diff --git a/outputs.tf b/outputs.tf
index 29d575af..febd4eef 100644
--- a/outputs.tf
+++ b/outputs.tf
@@ -1,46 +1,46 @@
output "security_group_id" {
- description = "ID of the created Security Group for the EKS cluster"
- value = join("", aws_security_group.default.*.id)
+ description = "(Deprecated) ID of the optionally created additional Security Group for the EKS cluster"
+ value = one(aws_security_group.default[*].id)
}
output "security_group_arn" {
- description = "ARN of the created Security Group for the EKS cluster"
- value = join("", aws_security_group.default.*.arn)
+ description = "(Deprecated) ARN of the optionally created additional Security Group for the EKS cluster"
+ value = one(aws_security_group.default[*].arn)
}
output "security_group_name" {
- description = "Name of the created Security Group for the EKS cluster"
- value = join("", aws_security_group.default.*.name)
+ description = "Name of the optionally created additional Security Group for the EKS cluster"
+ value = one(aws_security_group.default[*].name)
}
output "eks_cluster_id" {
description = "The name of the cluster"
- value = join("", aws_eks_cluster.default.*.id)
+ value = one(aws_eks_cluster.default[*].id)
}
output "eks_cluster_arn" {
description = "The Amazon Resource Name (ARN) of the cluster"
- value = join("", aws_eks_cluster.default.*.arn)
+ value = one(aws_eks_cluster.default[*].arn)
}
output "eks_cluster_endpoint" {
description = "The endpoint for the Kubernetes API server"
- value = join("", aws_eks_cluster.default.*.endpoint)
+ value = one(aws_eks_cluster.default[*].endpoint)
}
output "eks_cluster_version" {
description = "The Kubernetes server version of the cluster"
- value = join("", aws_eks_cluster.default.*.version)
+ value = one(aws_eks_cluster.default[*].version)
}
output "eks_cluster_identity_oidc_issuer" {
description = "The OIDC Identity issuer for the cluster"
- value = join("", aws_eks_cluster.default.*.identity.0.oidc.0.issuer)
+ value = one(aws_eks_cluster.default[*].identity.0.oidc.0.issuer)
}
output "eks_cluster_identity_oidc_issuer_arn" {
description = "The OIDC Identity issuer ARN for the cluster that can be used to associate IAM roles with a service account"
- value = join("", aws_iam_openid_connect_provider.default.*.arn)
+ value = one(aws_iam_openid_connect_provider.default[*].arn)
}
output "eks_cluster_certificate_authority_data" {
@@ -49,8 +49,11 @@ output "eks_cluster_certificate_authority_data" {
}
output "eks_cluster_managed_security_group_id" {
- description = "Security Group ID that was created by EKS for the cluster. EKS creates a Security Group and applies it to ENI that is attached to EKS Control Plane master nodes and to any managed workloads"
- value = join("", aws_eks_cluster.default.*.vpc_config.0.cluster_security_group_id)
+ description = <<-EOT
+ Security Group ID that was created by EKS for the cluster.
+ EKS creates a Security Group and applies it to the ENI that are attached to EKS Control Plane master nodes and to any managed workloads.
+ EOT
+ value = one(aws_eks_cluster.default[*].vpc_config.0.cluster_security_group_id)
}
output "eks_cluster_role_arn" {
@@ -60,7 +63,7 @@ output "eks_cluster_role_arn" {
output "kubernetes_config_map_id" {
description = "ID of `aws-auth` Kubernetes ConfigMap"
- value = var.kubernetes_config_map_ignore_role_changes ? join("", kubernetes_config_map.aws_auth_ignore_changes.*.id) : join("", kubernetes_config_map.aws_auth.*.id)
+ value = var.kubernetes_config_map_ignore_role_changes ? one(kubernetes_config_map.aws_auth_ignore_changes[*].id) : one(kubernetes_config_map.aws_auth[*].id)
}
output "cluster_encryption_config_enabled" {
@@ -80,7 +83,7 @@ output "cluster_encryption_config_provider_key_arn" {
output "cluster_encryption_config_provider_key_alias" {
description = "Cluster Encryption Config KMS Key Alias ARN"
- value = join("", aws_kms_alias.cluster.*.arn)
+ value = one(aws_kms_alias.cluster[*].arn)
}
output "cloudwatch_log_group_name" {
diff --git a/security-group.tf b/security-group.tf
index 699b8eb4..e631a898 100644
--- a/security-group.tf
+++ b/security-group.tf
@@ -10,7 +10,7 @@ resource "aws_security_group_rule" "managed_ingress_security_groups" {
to_port = 65535
protocol = "-1"
source_security_group_id = local.allowed_security_group_ids[count.index]
- security_group_id = join("", aws_eks_cluster.default.*.vpc_config.0.cluster_security_group_id)
+ security_group_id = one(aws_eks_cluster.default[*].vpc_config.0.cluster_security_group_id)
type = "ingress"
}
@@ -22,7 +22,7 @@ resource "aws_security_group_rule" "managed_ingress_cidr_blocks" {
to_port = 65535
protocol = "-1"
cidr_blocks = var.allowed_cidr_blocks
- security_group_id = join("", aws_eks_cluster.default.*.vpc_config.0.cluster_security_group_id)
+ security_group_id = one(aws_eks_cluster.default[*].vpc_config.0.cluster_security_group_id)
type = "ingress"
}
@@ -51,7 +51,7 @@ resource "aws_security_group_rule" "egress" {
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
- security_group_id = join("", aws_security_group.default.*.id)
+ security_group_id = one(aws_security_group.default[*].id)
type = "egress"
}
@@ -63,7 +63,7 @@ resource "aws_security_group_rule" "ingress_workers" {
to_port = 65535
protocol = "-1"
source_security_group_id = var.workers_security_group_ids[count.index]
- security_group_id = join("", aws_security_group.default.*.id)
+ security_group_id = one(aws_security_group.default[*].id)
type = "ingress"
}
@@ -75,7 +75,7 @@ resource "aws_security_group_rule" "ingress_security_groups" {
to_port = 65535
protocol = "-1"
source_security_group_id = var.allowed_security_groups[count.index]
- security_group_id = join("", aws_security_group.default.*.id)
+ security_group_id = one(aws_security_group.default[*].id)
type = "ingress"
}
@@ -87,7 +87,7 @@ resource "aws_security_group_rule" "ingress_cidr_blocks" {
to_port = 65535
protocol = "-1"
cidr_blocks = var.allowed_cidr_blocks
- security_group_id = join("", aws_security_group.default.*.id)
+ security_group_id = one(aws_security_group.default[*].id)
type = "ingress"
}
@@ -100,6 +100,6 @@ resource "aws_security_group_rule" "custom_ingress_rules" {
to_port = each.value.to_port
protocol = each.value.protocol
source_security_group_id = each.value.source_security_group_id
- security_group_id = join("", aws_eks_cluster.default.*.vpc_config.0.cluster_security_group_id)
+ security_group_id = one(aws_eks_cluster.default[*].vpc_config.0.cluster_security_group_id)
type = "ingress"
}
diff --git a/test/src/go.mod b/test/src/go.mod
index 996f69b2..04165228 100644
--- a/test/src/go.mod
+++ b/test/src/go.mod
@@ -1,13 +1,13 @@
module github.com/cloudposse/terraform-aws-eks-cluster
-go 1.18
+go 1.19
require (
- github.com/aws/aws-sdk-go v1.44.62
- github.com/gruntwork-io/terratest v0.40.18
+ github.com/aws/aws-sdk-go v1.44.116
+ github.com/gruntwork-io/terratest v0.40.23
github.com/stretchr/testify v1.8.0
- k8s.io/api v0.22.1
- k8s.io/client-go v0.22.1
+ k8s.io/api v0.22.15
+ k8s.io/client-go v0.22.15
sigs.k8s.io/aws-iam-authenticator v0.5.9
)
@@ -79,7 +79,7 @@ require (
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- k8s.io/apimachinery v0.22.1 // indirect
+ k8s.io/apimachinery v0.22.15 // indirect
k8s.io/klog/v2 v2.9.0 // indirect
k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
diff --git a/test/src/go.sum b/test/src/go.sum
index 3762f3d2..3053a02f 100644
--- a/test/src/go.sum
+++ b/test/src/go.sum
@@ -74,8 +74,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM=
github.com/aws/aws-sdk-go v1.43.28/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
-github.com/aws/aws-sdk-go v1.44.62 h1:N8qOPnBhl2ZCIFiqyB640Xt5CeX9D8CEVhG/Vj7jGJU=
-github.com/aws/aws-sdk-go v1.44.62/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
+github.com/aws/aws-sdk-go v1.44.116 h1:NpLIhcvLWXJZAEwvPj3TDHeqp7DleK6ZUVYyW01WNHY=
+github.com/aws/aws-sdk-go v1.44.116/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@@ -250,8 +250,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmg
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/gruntwork-io/terratest v0.40.18 h1:xuFaHOf/7kwc5cQN+6FfbmKglneBKesZxPHgISgkUlc=
-github.com/gruntwork-io/terratest v0.40.18/go.mod h1:JGeIGgLbxbG9/Oqm06z6YXVr76CfomdmLkV564qov+8=
+github.com/gruntwork-io/terratest v0.40.23 h1:UKSJhrXfbyiaGOkQmqjTtbQsXi+9uSu3H8nrT9X1PGg=
+github.com/gruntwork-io/terratest v0.40.23/go.mod h1:JGeIGgLbxbG9/Oqm06z6YXVr76CfomdmLkV564qov+8=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
@@ -602,6 +602,7 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -923,12 +924,15 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY=
k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY=
-k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM=
+k8s.io/api v0.22.15 h1:ersJ+vN3OD+3xutZ+T/VT8hHKpjfzFOftw05li3S2VI=
+k8s.io/api v0.22.15/go.mod h1:sJ9GkTzy/ni7FnB8Rf9o1TAHrlrUIiqD+bW/nlrjjHk=
k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
-k8s.io/client-go v0.22.1 h1:jW0ZSHi8wW260FvcXHkIa0NLxFBQszTlhiAVsU5mopw=
+k8s.io/apimachinery v0.22.15 h1:Ad8XfYmIwYVHznV2iNatXujRVTEOs0h2RCYm81Ql5EM=
+k8s.io/apimachinery v0.22.15/go.mod h1:ZvVLP5iLhwVFg2Yx9Gh5W0um0DUauExbRhe+2Z8I1EU=
k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk=
+k8s.io/client-go v0.22.15 h1:uQxFSlfgu/Ch0+zEIwZun8gEgp7LpiQRyo+DeIBgr6Y=
+k8s.io/client-go v0.22.15/go.mod h1:1qlC8gNGTaO6is6b7FfOvEAPiYOT2zf6YAANhe0u8CI=
k8s.io/code-generator v0.22.1/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o=
k8s.io/component-base v0.22.1/go.mod h1:0D+Bl8rrnsPN9v0dyYvkqFfBeAd4u7n77ze+p8CMiPo=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
@@ -938,6 +942,7 @@ k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM=
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
+k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
k8s.io/sample-controller v0.22.1/go.mod h1:184Fa29md4PuQSEozdEw6n+AAmoodWOy9iCtyfCvAWY=
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20211116205334-6203023598ed h1:ck1fRPWPJWsMd8ZRFsWc6mh/zHp5fZ/shhbrgPUxDAE=
diff --git a/variables.tf b/variables.tf
index 5046dc08..cf0a1fa0 100644
--- a/variables.tf
+++ b/variables.tf
@@ -139,7 +139,7 @@ variable "wait_for_cluster_command" {
description = "`local-exec` command to execute to determine if the EKS cluster is healthy. Cluster endpoint URL is available as environment variable `ENDPOINT`"
## --max-time is per attempt, --retry is the number of attempts
## Approx. total time limit is (max-time + retry-delay) * retry seconds
- default = "curl --silent --fail --retry 30 --retry-delay 10 --retry-connrefused --max-time 11 --insecure --output /dev/null $ENDPOINT/healthz"
+ default = "if test -n \"$ENDPOINT\"; then curl --silent --fail --retry 30 --retry-delay 10 --retry-connrefused --max-time 11 --insecure --output /dev/null $ENDPOINT/healthz; fi"
}
variable "kubernetes_config_map_ignore_role_changes" {
diff --git a/versions.tf b/versions.tf
index ec1b2c42..dfdacb7a 100644
--- a/versions.tf
+++ b/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 0.14.0"
+ required_version = ">= 1.0.0"
required_providers {
aws = {