diff --git a/bitnami/kafka/README.md b/bitnami/kafka/README.md index 2babdbb6654542..e74db74aa9fcb7 100644 --- a/bitnami/kafka/README.md +++ b/bitnami/kafka/README.md @@ -553,317 +553,349 @@ You can enable this initContainer by setting `volumePermissions.enabled` to `tru ### Controller-eligible statefulset parameters -| Name | Description | Value | -| -------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | -| `controller.replicaCount` | Number of Kafka controller-eligible nodes | `3` | -| `controller.controllerOnly` | If set to true, controller nodes will be deployed as dedicated controllers, instead of controller+broker processes. | `false` | -| `controller.minId` | Minimal node.id values for controller-eligible nodes. Do not change after first initialization. | `0` | -| `controller.zookeeperMigrationMode` | Set to true to deploy cluster controller quorum | `false` | -| `controller.config` | Configuration file for Kafka controller-eligible nodes, rendered as a template. Auto-generated based on chart values when not specified. | `""` | -| `controller.existingConfigmap` | ConfigMap with Kafka Configuration for controller-eligible nodes. | `""` | -| `controller.extraConfig` | Additional configuration to be appended at the end of the generated Kafka controller-eligible nodes configuration file. | `""` | -| `controller.secretConfig` | Additional configuration to be appended at the end of the generated Kafka controller-eligible nodes configuration file. | `""` | -| `controller.existingSecretConfig` | Secret with additonal configuration that will be appended to the end of the generated Kafka controller-eligible nodes configuration file | `""` | -| `controller.heapOpts` | Kafka Java Heap size for controller-eligible nodes | `-Xmx1024m -Xms1024m` | -| `controller.command` | Override Kafka container command | `[]` | -| `controller.args` | Override Kafka container arguments | `[]` | -| `controller.extraEnvVars` | Extra environment variables to add to Kafka pods | `[]` | -| `controller.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | -| `controller.extraEnvVarsSecret` | Secret with extra environment variables | `""` | -| `controller.extraContainerPorts` | Kafka controller-eligible extra containerPorts. | `[]` | -| `controller.livenessProbe.enabled` | Enable livenessProbe on Kafka containers | `true` | -| `controller.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `10` | -| `controller.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | -| `controller.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | -| `controller.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | -| `controller.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | -| `controller.readinessProbe.enabled` | Enable readinessProbe on Kafka containers | `true` | -| `controller.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | -| `controller.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | -| `controller.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | -| `controller.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | -| `controller.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `controller.startupProbe.enabled` | Enable startupProbe on Kafka containers | `false` | -| `controller.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | -| `controller.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | -| `controller.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | -| `controller.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | -| `controller.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | -| `controller.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | -| `controller.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | -| `controller.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | -| `controller.lifecycleHooks` | lifecycleHooks for the Kafka container to automate configuration before or after startup | `{}` | -| `controller.initContainerResources.limits` | The resources limits for the init container | `{}` | -| `controller.initContainerResources.requests` | The requested resources for the init container | `{}` | -| `controller.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if controller.resources is set (controller.resources is recommended for production). | `small` | -| `controller.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | -| `controller.podSecurityContext.enabled` | Enable security context for the pods | `true` | -| `controller.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | -| `controller.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | -| `controller.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | -| `controller.podSecurityContext.fsGroup` | Set Kafka pod's Security Context fsGroup | `1001` | -| `controller.podSecurityContext.seccompProfile.type` | Set Kafka pods's Security Context seccomp profile | `RuntimeDefault` | -| `controller.containerSecurityContext.enabled` | Enable Kafka containers' Security Context | `true` | -| `controller.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | -| `controller.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | -| `controller.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | -| `controller.containerSecurityContext.runAsGroup` | Set Kafka containers' Security Context runAsGroup | `1001` | -| `controller.containerSecurityContext.runAsNonRoot` | Set Kafka containers' Security Context runAsNonRoot | `true` | -| `controller.containerSecurityContext.allowPrivilegeEscalation` | Force the child process to be run as non-privileged | `false` | -| `controller.containerSecurityContext.readOnlyRootFilesystem` | Allows the pod to mount the RootFS as ReadOnly only | `true` | -| `controller.containerSecurityContext.capabilities.drop` | Set Kafka containers' server Security Context capabilities to be dropped | `["ALL"]` | -| `controller.automountServiceAccountToken` | Mount Service Account token in pod | `false` | -| `controller.hostAliases` | Kafka pods host aliases | `[]` | -| `controller.hostNetwork` | Specify if host network should be enabled for Kafka pods | `false` | -| `controller.hostIPC` | Specify if host IPC should be enabled for Kafka pods | `false` | -| `controller.podLabels` | Extra labels for Kafka pods | `{}` | -| `controller.podAnnotations` | Extra annotations for Kafka pods | `{}` | -| `controller.podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `controller.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | -| `controller.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `controller.nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | -| `controller.nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | -| `controller.affinity` | Affinity for pod assignment | `{}` | -| `controller.nodeSelector` | Node labels for pod assignment | `{}` | -| `controller.tolerations` | Tolerations for pod assignment | `[]` | -| `controller.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | -| `controller.terminationGracePeriodSeconds` | Seconds the pod needs to gracefully terminate | `""` | -| `controller.podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel | `Parallel` | -| `controller.minReadySeconds` | How many seconds a pod needs to be ready before killing the next, during update | `0` | -| `controller.priorityClassName` | Name of the existing priority class to be used by kafka pods | `""` | -| `controller.runtimeClassName` | Name of the runtime class to be used by pod(s) | `""` | -| `controller.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | -| `controller.schedulerName` | Name of the k8s scheduler (other than default) | `""` | -| `controller.updateStrategy.type` | Kafka statefulset strategy type | `RollingUpdate` | -| `controller.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka pod(s) | `[]` | -| `controller.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka container(s) | `[]` | -| `controller.sidecars` | Add additional sidecar containers to the Kafka pod(s) | `[]` | -| `controller.initContainers` | Add additional Add init containers to the Kafka pod(s) | `[]` | -| `controller.pdb.create` | Deploy a pdb object for the Kafka pod | `false` | -| `controller.pdb.minAvailable` | Maximum number/percentage of unavailable Kafka replicas | `""` | -| `controller.pdb.maxUnavailable` | Maximum number/percentage of unavailable Kafka replicas | `1` | -| `controller.persistence.enabled` | Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected | `true` | -| `controller.persistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | -| `controller.persistence.storageClass` | PVC Storage Class for Kafka data volume | `""` | -| `controller.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | -| `controller.persistence.size` | PVC Storage Request for Kafka data volume | `8Gi` | -| `controller.persistence.annotations` | Annotations for the PVC | `{}` | -| `controller.persistence.labels` | Labels for the PVC | `{}` | -| `controller.persistence.selector` | Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | -| `controller.persistence.mountPath` | Mount path of the Kafka data volume | `/bitnami/kafka` | -| `controller.logPersistence.enabled` | Enable Kafka logs persistence using PVC, note that ZooKeeper persistence is unaffected | `false` | -| `controller.logPersistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | -| `controller.logPersistence.storageClass` | PVC Storage Class for Kafka logs volume | `""` | -| `controller.logPersistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | -| `controller.logPersistence.size` | PVC Storage Request for Kafka logs volume | `8Gi` | -| `controller.logPersistence.annotations` | Annotations for the PVC | `{}` | -| `controller.logPersistence.selector` | Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | -| `controller.logPersistence.mountPath` | Mount path of the Kafka logs volume | `/opt/bitnami/kafka/logs` | +| Name | Description | Value | +| -------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| `controller.replicaCount` | Number of Kafka controller-eligible nodes | `3` | +| `controller.controllerOnly` | If set to true, controller nodes will be deployed as dedicated controllers, instead of controller+broker processes. | `false` | +| `controller.minId` | Minimal node.id values for controller-eligible nodes. Do not change after first initialization. | `0` | +| `controller.zookeeperMigrationMode` | Set to true to deploy cluster controller quorum | `false` | +| `controller.config` | Configuration file for Kafka controller-eligible nodes, rendered as a template. Auto-generated based on chart values when not specified. | `""` | +| `controller.existingConfigmap` | ConfigMap with Kafka Configuration for controller-eligible nodes. | `""` | +| `controller.extraConfig` | Additional configuration to be appended at the end of the generated Kafka controller-eligible nodes configuration file. | `""` | +| `controller.secretConfig` | Additional configuration to be appended at the end of the generated Kafka controller-eligible nodes configuration file. | `""` | +| `controller.existingSecretConfig` | Secret with additonal configuration that will be appended to the end of the generated Kafka controller-eligible nodes configuration file | `""` | +| `controller.heapOpts` | Kafka Java Heap size for controller-eligible nodes | `-Xmx1024m -Xms1024m` | +| `controller.command` | Override Kafka container command | `[]` | +| `controller.args` | Override Kafka container arguments | `[]` | +| `controller.extraEnvVars` | Extra environment variables to add to Kafka pods | `[]` | +| `controller.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `controller.extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `controller.extraContainerPorts` | Kafka controller-eligible extra containerPorts. | `[]` | +| `controller.livenessProbe.enabled` | Enable livenessProbe on Kafka containers | `true` | +| `controller.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `10` | +| `controller.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `controller.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `controller.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `controller.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `controller.readinessProbe.enabled` | Enable readinessProbe on Kafka containers | `true` | +| `controller.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `controller.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `controller.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `controller.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `controller.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `controller.startupProbe.enabled` | Enable startupProbe on Kafka containers | `false` | +| `controller.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `controller.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `controller.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `controller.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `controller.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `controller.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `controller.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `controller.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `controller.lifecycleHooks` | lifecycleHooks for the Kafka container to automate configuration before or after startup | `{}` | +| `controller.initContainerResources.limits` | The resources limits for the init container | `{}` | +| `controller.initContainerResources.requests` | The requested resources for the init container | `{}` | +| `controller.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if controller.resources is set (controller.resources is recommended for production). | `small` | +| `controller.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `controller.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `controller.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `controller.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `controller.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `controller.podSecurityContext.fsGroup` | Set Kafka pod's Security Context fsGroup | `1001` | +| `controller.podSecurityContext.seccompProfile.type` | Set Kafka pods's Security Context seccomp profile | `RuntimeDefault` | +| `controller.containerSecurityContext.enabled` | Enable Kafka containers' Security Context | `true` | +| `controller.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `controller.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `controller.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `controller.containerSecurityContext.runAsGroup` | Set Kafka containers' Security Context runAsGroup | `1001` | +| `controller.containerSecurityContext.runAsNonRoot` | Set Kafka containers' Security Context runAsNonRoot | `true` | +| `controller.containerSecurityContext.allowPrivilegeEscalation` | Force the child process to be run as non-privileged | `false` | +| `controller.containerSecurityContext.readOnlyRootFilesystem` | Allows the pod to mount the RootFS as ReadOnly only | `true` | +| `controller.containerSecurityContext.capabilities.drop` | Set Kafka containers' server Security Context capabilities to be dropped | `["ALL"]` | +| `controller.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `controller.hostAliases` | Kafka pods host aliases | `[]` | +| `controller.hostNetwork` | Specify if host network should be enabled for Kafka pods | `false` | +| `controller.hostIPC` | Specify if host IPC should be enabled for Kafka pods | `false` | +| `controller.podLabels` | Extra labels for Kafka pods | `{}` | +| `controller.podAnnotations` | Extra annotations for Kafka pods | `{}` | +| `controller.podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `controller.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `controller.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `controller.nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `controller.nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `controller.affinity` | Affinity for pod assignment | `{}` | +| `controller.nodeSelector` | Node labels for pod assignment | `{}` | +| `controller.tolerations` | Tolerations for pod assignment | `[]` | +| `controller.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `controller.terminationGracePeriodSeconds` | Seconds the pod needs to gracefully terminate | `""` | +| `controller.podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel | `Parallel` | +| `controller.minReadySeconds` | How many seconds a pod needs to be ready before killing the next, during update | `0` | +| `controller.priorityClassName` | Name of the existing priority class to be used by kafka pods | `""` | +| `controller.runtimeClassName` | Name of the runtime class to be used by pod(s) | `""` | +| `controller.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | +| `controller.schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `controller.updateStrategy.type` | Kafka statefulset strategy type | `RollingUpdate` | +| `controller.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka pod(s) | `[]` | +| `controller.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka container(s) | `[]` | +| `controller.sidecars` | Add additional sidecar containers to the Kafka pod(s) | `[]` | +| `controller.initContainers` | Add additional Add init containers to the Kafka pod(s) | `[]` | + +### Experimental: Kafka Controller Autoscaling configuration + +| Name | Description | Value | +| ---------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `controller.autoscaling.vpa.enabled` | Enable VPA | `false` | +| `controller.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `controller.autoscaling.vpa.controlledResources` | VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory | `[]` | +| `controller.autoscaling.vpa.maxAllowed` | VPA Max allowed resources for the pod | `{}` | +| `controller.autoscaling.vpa.minAllowed` | VPA Min allowed resources for the pod | `{}` | +| `controller.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod | `Auto` | +| `controller.autoscaling.hpa.enabled` | Enable HPA for Kafka Controller | `false` | +| `controller.autoscaling.hpa.minReplicas` | Minimum number of Kafka Controller replicas | `""` | +| `controller.autoscaling.hpa.maxReplicas` | Maximum number of Kafka Controller replicas | `""` | +| `controller.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `controller.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | +| `controller.pdb.create` | Deploy a pdb object for the Kafka pod | `false` | +| `controller.pdb.minAvailable` | Maximum number/percentage of unavailable Kafka replicas | `""` | +| `controller.pdb.maxUnavailable` | Maximum number/percentage of unavailable Kafka replicas | `1` | +| `controller.persistence.enabled` | Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected | `true` | +| `controller.persistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `controller.persistence.storageClass` | PVC Storage Class for Kafka data volume | `""` | +| `controller.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `controller.persistence.size` | PVC Storage Request for Kafka data volume | `8Gi` | +| `controller.persistence.annotations` | Annotations for the PVC | `{}` | +| `controller.persistence.labels` | Labels for the PVC | `{}` | +| `controller.persistence.selector` | Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `controller.persistence.mountPath` | Mount path of the Kafka data volume | `/bitnami/kafka` | +| `controller.logPersistence.enabled` | Enable Kafka logs persistence using PVC, note that ZooKeeper persistence is unaffected | `false` | +| `controller.logPersistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `controller.logPersistence.storageClass` | PVC Storage Class for Kafka logs volume | `""` | +| `controller.logPersistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `controller.logPersistence.size` | PVC Storage Request for Kafka logs volume | `8Gi` | +| `controller.logPersistence.annotations` | Annotations for the PVC | `{}` | +| `controller.logPersistence.selector` | Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `controller.logPersistence.mountPath` | Mount path of the Kafka logs volume | `/opt/bitnami/kafka/logs` | ### Broker-only statefulset parameters -| Name | Description | Value | -| ---------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------- | -| `broker.replicaCount` | Number of Kafka broker-only nodes | `0` | -| `broker.minId` | Minimal node.id values for broker-only nodes. Do not change after first initialization. | `100` | -| `broker.zookeeperMigrationMode` | Set to true to deploy cluster controller quorum | `false` | -| `broker.config` | Configuration file for Kafka broker-only nodes, rendered as a template. Auto-generated based on chart values when not specified. | `""` | -| `broker.existingConfigmap` | ConfigMap with Kafka Configuration for broker-only nodes. | `""` | -| `broker.extraConfig` | Additional configuration to be appended at the end of the generated Kafka broker-only nodes configuration file. | `""` | -| `broker.secretConfig` | Additional configuration to be appended at the end of the generated Kafka broker-only nodes configuration file. | `""` | -| `broker.existingSecretConfig` | Secret with additonal configuration that will be appended to the end of the generated Kafka broker-only nodes configuration file | `""` | -| `broker.heapOpts` | Kafka Java Heap size for broker-only nodes | `-Xmx1024m -Xms1024m` | -| `broker.command` | Override Kafka container command | `[]` | -| `broker.args` | Override Kafka container arguments | `[]` | -| `broker.extraEnvVars` | Extra environment variables to add to Kafka pods | `[]` | -| `broker.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | -| `broker.extraEnvVarsSecret` | Secret with extra environment variables | `""` | -| `broker.extraContainerPorts` | Kafka broker-only extra containerPorts. | `[]` | -| `broker.livenessProbe.enabled` | Enable livenessProbe on Kafka containers | `true` | -| `broker.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `10` | -| `broker.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | -| `broker.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | -| `broker.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | -| `broker.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | -| `broker.readinessProbe.enabled` | Enable readinessProbe on Kafka containers | `true` | -| `broker.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | -| `broker.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | -| `broker.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | -| `broker.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | -| `broker.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `broker.startupProbe.enabled` | Enable startupProbe on Kafka containers | `false` | -| `broker.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | -| `broker.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | -| `broker.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | -| `broker.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | -| `broker.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | -| `broker.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | -| `broker.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | -| `broker.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | -| `broker.lifecycleHooks` | lifecycleHooks for the Kafka container to automate configuration before or after startup | `{}` | -| `broker.initContainerResources.limits` | The resources limits for the container | `{}` | -| `broker.initContainerResources.requests` | The requested resources for the container | `{}` | -| `broker.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if broker.resources is set (broker.resources is recommended for production). | `small` | -| `broker.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | -| `broker.podSecurityContext.enabled` | Enable security context for the pods | `true` | -| `broker.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | -| `broker.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | -| `broker.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | -| `broker.podSecurityContext.fsGroup` | Set Kafka pod's Security Context fsGroup | `1001` | -| `broker.podSecurityContext.seccompProfile.type` | Set Kafka pod's Security Context seccomp profile | `RuntimeDefault` | -| `broker.containerSecurityContext.enabled` | Enable Kafka containers' Security Context | `true` | -| `broker.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | -| `broker.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | -| `broker.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | -| `broker.containerSecurityContext.runAsNonRoot` | Set Kafka containers' Security Context runAsNonRoot | `true` | -| `broker.containerSecurityContext.allowPrivilegeEscalation` | Force the child process to be run as non-privileged | `false` | -| `broker.containerSecurityContext.readOnlyRootFilesystem` | Allows the pod to mount the RootFS as ReadOnly only | `true` | -| `broker.containerSecurityContext.capabilities.drop` | Set Kafka containers' server Security Context capabilities to be dropped | `["ALL"]` | -| `broker.automountServiceAccountToken` | Mount Service Account token in pod | `false` | -| `broker.hostAliases` | Kafka pods host aliases | `[]` | -| `broker.hostNetwork` | Specify if host network should be enabled for Kafka pods | `false` | -| `broker.hostIPC` | Specify if host IPC should be enabled for Kafka pods | `false` | -| `broker.podLabels` | Extra labels for Kafka pods | `{}` | -| `broker.podAnnotations` | Extra annotations for Kafka pods | `{}` | -| `broker.podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `broker.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | -| `broker.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `broker.nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | -| `broker.nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | -| `broker.affinity` | Affinity for pod assignment | `{}` | -| `broker.nodeSelector` | Node labels for pod assignment | `{}` | -| `broker.tolerations` | Tolerations for pod assignment | `[]` | -| `broker.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | -| `broker.terminationGracePeriodSeconds` | Seconds the pod needs to gracefully terminate | `""` | -| `broker.podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel | `Parallel` | -| `broker.minReadySeconds` | How many seconds a pod needs to be ready before killing the next, during update | `0` | -| `broker.priorityClassName` | Name of the existing priority class to be used by kafka pods | `""` | -| `broker.runtimeClassName` | Name of the runtime class to be used by pod(s) | `""` | -| `broker.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | -| `broker.schedulerName` | Name of the k8s scheduler (other than default) | `""` | -| `broker.updateStrategy.type` | Kafka statefulset strategy type | `RollingUpdate` | -| `broker.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka pod(s) | `[]` | -| `broker.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka container(s) | `[]` | -| `broker.sidecars` | Add additional sidecar containers to the Kafka pod(s) | `[]` | -| `broker.initContainers` | Add additional Add init containers to the Kafka pod(s) | `[]` | -| `broker.pdb.create` | Deploy a pdb object for the Kafka pod | `false` | -| `broker.pdb.minAvailable` | Maximum number/percentage of unavailable Kafka replicas | `""` | -| `broker.pdb.maxUnavailable` | Maximum number/percentage of unavailable Kafka replicas | `1` | -| `broker.persistence.enabled` | Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected | `true` | -| `broker.persistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | -| `broker.persistence.storageClass` | PVC Storage Class for Kafka data volume | `""` | -| `broker.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | -| `broker.persistence.size` | PVC Storage Request for Kafka data volume | `8Gi` | -| `broker.persistence.annotations` | Annotations for the PVC | `{}` | -| `broker.persistence.labels` | Labels for the PVC | `{}` | -| `broker.persistence.selector` | Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | -| `broker.persistence.mountPath` | Mount path of the Kafka data volume | `/bitnami/kafka` | -| `broker.logPersistence.enabled` | Enable Kafka logs persistence using PVC, note that ZooKeeper persistence is unaffected | `false` | -| `broker.logPersistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | -| `broker.logPersistence.storageClass` | PVC Storage Class for Kafka logs volume | `""` | -| `broker.logPersistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | -| `broker.logPersistence.size` | PVC Storage Request for Kafka logs volume | `8Gi` | -| `broker.logPersistence.annotations` | Annotations for the PVC | `{}` | -| `broker.logPersistence.selector` | Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | -| `broker.logPersistence.mountPath` | Mount path of the Kafka logs volume | `/opt/bitnami/kafka/logs` | +| Name | Description | Value | +| ---------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| `broker.replicaCount` | Number of Kafka broker-only nodes | `0` | +| `broker.minId` | Minimal node.id values for broker-only nodes. Do not change after first initialization. | `100` | +| `broker.zookeeperMigrationMode` | Set to true to deploy cluster controller quorum | `false` | +| `broker.config` | Configuration file for Kafka broker-only nodes, rendered as a template. Auto-generated based on chart values when not specified. | `""` | +| `broker.existingConfigmap` | ConfigMap with Kafka Configuration for broker-only nodes. | `""` | +| `broker.extraConfig` | Additional configuration to be appended at the end of the generated Kafka broker-only nodes configuration file. | `""` | +| `broker.secretConfig` | Additional configuration to be appended at the end of the generated Kafka broker-only nodes configuration file. | `""` | +| `broker.existingSecretConfig` | Secret with additonal configuration that will be appended to the end of the generated Kafka broker-only nodes configuration file | `""` | +| `broker.heapOpts` | Kafka Java Heap size for broker-only nodes | `-Xmx1024m -Xms1024m` | +| `broker.command` | Override Kafka container command | `[]` | +| `broker.args` | Override Kafka container arguments | `[]` | +| `broker.extraEnvVars` | Extra environment variables to add to Kafka pods | `[]` | +| `broker.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `broker.extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `broker.extraContainerPorts` | Kafka broker-only extra containerPorts. | `[]` | +| `broker.livenessProbe.enabled` | Enable livenessProbe on Kafka containers | `true` | +| `broker.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `10` | +| `broker.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `broker.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `broker.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `broker.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `broker.readinessProbe.enabled` | Enable readinessProbe on Kafka containers | `true` | +| `broker.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `broker.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `broker.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `broker.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `broker.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `broker.startupProbe.enabled` | Enable startupProbe on Kafka containers | `false` | +| `broker.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `broker.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `broker.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `broker.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `broker.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `broker.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `broker.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `broker.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `broker.lifecycleHooks` | lifecycleHooks for the Kafka container to automate configuration before or after startup | `{}` | +| `broker.initContainerResources.limits` | The resources limits for the container | `{}` | +| `broker.initContainerResources.requests` | The requested resources for the container | `{}` | +| `broker.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if broker.resources is set (broker.resources is recommended for production). | `small` | +| `broker.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `broker.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `broker.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `broker.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `broker.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `broker.podSecurityContext.fsGroup` | Set Kafka pod's Security Context fsGroup | `1001` | +| `broker.podSecurityContext.seccompProfile.type` | Set Kafka pod's Security Context seccomp profile | `RuntimeDefault` | +| `broker.containerSecurityContext.enabled` | Enable Kafka containers' Security Context | `true` | +| `broker.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `broker.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `broker.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `broker.containerSecurityContext.runAsNonRoot` | Set Kafka containers' Security Context runAsNonRoot | `true` | +| `broker.containerSecurityContext.allowPrivilegeEscalation` | Force the child process to be run as non-privileged | `false` | +| `broker.containerSecurityContext.readOnlyRootFilesystem` | Allows the pod to mount the RootFS as ReadOnly only | `true` | +| `broker.containerSecurityContext.capabilities.drop` | Set Kafka containers' server Security Context capabilities to be dropped | `["ALL"]` | +| `broker.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `broker.hostAliases` | Kafka pods host aliases | `[]` | +| `broker.hostNetwork` | Specify if host network should be enabled for Kafka pods | `false` | +| `broker.hostIPC` | Specify if host IPC should be enabled for Kafka pods | `false` | +| `broker.podLabels` | Extra labels for Kafka pods | `{}` | +| `broker.podAnnotations` | Extra annotations for Kafka pods | `{}` | +| `broker.podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `broker.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `broker.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `broker.nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `broker.nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `broker.affinity` | Affinity for pod assignment | `{}` | +| `broker.nodeSelector` | Node labels for pod assignment | `{}` | +| `broker.tolerations` | Tolerations for pod assignment | `[]` | +| `broker.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `broker.terminationGracePeriodSeconds` | Seconds the pod needs to gracefully terminate | `""` | +| `broker.podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel | `Parallel` | +| `broker.minReadySeconds` | How many seconds a pod needs to be ready before killing the next, during update | `0` | +| `broker.priorityClassName` | Name of the existing priority class to be used by kafka pods | `""` | +| `broker.runtimeClassName` | Name of the runtime class to be used by pod(s) | `""` | +| `broker.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | +| `broker.schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `broker.updateStrategy.type` | Kafka statefulset strategy type | `RollingUpdate` | +| `broker.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka pod(s) | `[]` | +| `broker.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka container(s) | `[]` | +| `broker.sidecars` | Add additional sidecar containers to the Kafka pod(s) | `[]` | +| `broker.initContainers` | Add additional Add init containers to the Kafka pod(s) | `[]` | +| `broker.pdb.create` | Deploy a pdb object for the Kafka pod | `false` | +| `broker.pdb.minAvailable` | Maximum number/percentage of unavailable Kafka replicas | `""` | +| `broker.pdb.maxUnavailable` | Maximum number/percentage of unavailable Kafka replicas | `1` | + +### Experimental: Kafka Broker Autoscaling configuration + +| Name | Description | Value | +| ------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `broker.autoscaling.vpa.enabled` | Enable VPA | `false` | +| `broker.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `broker.autoscaling.vpa.controlledResources` | VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory | `[]` | +| `broker.autoscaling.vpa.maxAllowed` | VPA Max allowed resources for the pod | `{}` | +| `broker.autoscaling.vpa.minAllowed` | VPA Min allowed resources for the pod | `{}` | +| `broker.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod | `Auto` | +| `broker.autoscaling.hpa.enabled` | Enable HPA for Kafka Broker | `false` | +| `broker.autoscaling.hpa.minReplicas` | Minimum number of Kafka Broker replicas | `""` | +| `broker.autoscaling.hpa.maxReplicas` | Maximum number of Kafka Broker replicas | `""` | +| `broker.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `broker.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | +| `broker.persistence.enabled` | Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected | `true` | +| `broker.persistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `broker.persistence.storageClass` | PVC Storage Class for Kafka data volume | `""` | +| `broker.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `broker.persistence.size` | PVC Storage Request for Kafka data volume | `8Gi` | +| `broker.persistence.annotations` | Annotations for the PVC | `{}` | +| `broker.persistence.labels` | Labels for the PVC | `{}` | +| `broker.persistence.selector` | Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `broker.persistence.mountPath` | Mount path of the Kafka data volume | `/bitnami/kafka` | +| `broker.logPersistence.enabled` | Enable Kafka logs persistence using PVC, note that ZooKeeper persistence is unaffected | `false` | +| `broker.logPersistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `broker.logPersistence.storageClass` | PVC Storage Class for Kafka logs volume | `""` | +| `broker.logPersistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `broker.logPersistence.size` | PVC Storage Request for Kafka logs volume | `8Gi` | +| `broker.logPersistence.annotations` | Annotations for the PVC | `{}` | +| `broker.logPersistence.selector` | Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `broker.logPersistence.mountPath` | Mount path of the Kafka logs volume | `/opt/bitnami/kafka/logs` | ### Traffic Exposure parameters -| Name | Description | Value | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | -| `service.type` | Kubernetes Service type | `ClusterIP` | -| `service.ports.client` | Kafka svc port for client connections | `9092` | -| `service.ports.controller` | Kafka svc port for controller connections. It is used if "kraft.enabled: true" | `9093` | -| `service.ports.interbroker` | Kafka svc port for inter-broker connections | `9094` | -| `service.ports.external` | Kafka svc port for external connections | `9095` | -| `service.extraPorts` | Extra ports to expose in the Kafka service (normally used with the `sidecar` value) | `[]` | -| `service.nodePorts.client` | Node port for the Kafka client connections | `""` | -| `service.nodePorts.external` | Node port for the Kafka external connections | `""` | -| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | -| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | -| `service.clusterIP` | Kafka service Cluster IP | `""` | -| `service.loadBalancerIP` | Kafka service Load Balancer IP | `""` | -| `service.loadBalancerSourceRanges` | Kafka service Load Balancer sources | `[]` | -| `service.allocateLoadBalancerNodePorts` | Whether to allocate node ports when service type is LoadBalancer | `true` | -| `service.externalTrafficPolicy` | Kafka service external traffic policy | `Cluster` | -| `service.annotations` | Additional custom annotations for Kafka service | `{}` | -| `service.headless.controller.annotations` | Annotations for the controller-eligible headless service. | `{}` | -| `service.headless.controller.labels` | Labels for the controller-eligible headless service. | `{}` | -| `service.headless.broker.annotations` | Annotations for the broker-only headless service. | `{}` | -| `service.headless.broker.labels` | Labels for the broker-only headless service. | `{}` | -| `externalAccess.enabled` | Enable Kubernetes external cluster access to Kafka brokers | `false` | -| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs/ports by querying the K8s API | `false` | -| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry | `REGISTRY_NAME` | -| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image repository | `REPOSITORY_NAME/kubectl` | -| `externalAccess.autoDiscovery.image.digest` | Kubectl image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy | `IfNotPresent` | -| `externalAccess.autoDiscovery.image.pullSecrets` | Init container auto-discovery image pull secrets | `[]` | -| `externalAccess.autoDiscovery.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if externalAccess.autoDiscovery.resources is set (externalAccess.autoDiscovery.resources is recommended for production). | `nano` | -| `externalAccess.autoDiscovery.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | -| `externalAccess.autoDiscovery.containerSecurityContext.enabled` | Enable Kafka auto-discovery containers' Security Context | `true` | -| `externalAccess.autoDiscovery.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | -| `externalAccess.autoDiscovery.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | -| `externalAccess.autoDiscovery.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | -| `externalAccess.autoDiscovery.containerSecurityContext.runAsNonRoot` | Set Kafka auto-discovery containers' Security Context runAsNonRoot | `true` | -| `externalAccess.autoDiscovery.containerSecurityContext.allowPrivilegeEscalation` | Set Kafka auto-discovery containers' Security Context allowPrivilegeEscalation | `false` | -| `externalAccess.autoDiscovery.containerSecurityContext.readOnlyRootFilesystem` | Set Kafka auto-discovery containers' Security Context readOnlyRootFilesystem | `true` | -| `externalAccess.autoDiscovery.containerSecurityContext.capabilities.drop` | Set Kafka auto-discovery containers' Security Context capabilities to be dropped | `["ALL"]` | -| `externalAccess.autoDiscovery.containerSecurityContext.seccompProfile.type` | Set Kafka auto-discovery seccomp profile type | `RuntimeDefault` | -| `externalAccess.controller.forceExpose` | If set to true, force exposing controller-eligible nodes although they are configured as controller-only nodes | `false` | -| `externalAccess.controller.service.type` | Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP | `LoadBalancer` | -| `externalAccess.controller.service.ports.external` | Kafka port used for external access when service type is LoadBalancer | `9094` | -| `externalAccess.controller.service.loadBalancerIPs` | Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.controller.service.loadBalancerNames` | Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.controller.service.loadBalancerAnnotations` | Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.controller.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | -| `externalAccess.controller.service.allocateLoadBalancerNodePorts` | Whether to allocate node ports when service type is LoadBalancer | `true` | -| `externalAccess.controller.service.nodePorts` | Array of node ports used for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.controller.service.externalIPs` | Use distinct service host IPs to configure Kafka external listener when service type is NodePort. Length must be the same as replicaCount | `[]` | -| `externalAccess.controller.service.useHostIPs` | Use service host IPs to configure Kafka external listener when service type is NodePort | `false` | -| `externalAccess.controller.service.usePodIPs` | using the MY_POD_IP address for external access. | `false` | -| `externalAccess.controller.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP | `""` | -| `externalAccess.controller.service.publishNotReadyAddresses` | Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready | `false` | -| `externalAccess.controller.service.labels` | Service labels for external access | `{}` | -| `externalAccess.controller.service.annotations` | Service annotations for external access | `{}` | -| `externalAccess.controller.service.extraPorts` | Extra ports to expose in the Kafka external service | `[]` | -| `externalAccess.broker.service.type` | Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP | `LoadBalancer` | -| `externalAccess.broker.service.ports.external` | Kafka port used for external access when service type is LoadBalancer | `9094` | -| `externalAccess.broker.service.loadBalancerIPs` | Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.broker.service.loadBalancerNames` | Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.broker.service.loadBalancerAnnotations` | Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.broker.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | -| `externalAccess.broker.service.allocateLoadBalancerNodePorts` | Whether to allocate node ports when service type is LoadBalancer | `true` | -| `externalAccess.broker.service.nodePorts` | Array of node ports used for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.broker.service.externalIPs` | Use distinct service host IPs to configure Kafka external listener when service type is NodePort. Length must be the same as replicaCount | `[]` | -| `externalAccess.broker.service.useHostIPs` | Use service host IPs to configure Kafka external listener when service type is NodePort | `false` | -| `externalAccess.broker.service.usePodIPs` | using the MY_POD_IP address for external access. | `false` | -| `externalAccess.broker.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP | `""` | -| `externalAccess.broker.service.publishNotReadyAddresses` | Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready | `false` | -| `externalAccess.broker.service.labels` | Service labels for external access | `{}` | -| `externalAccess.broker.service.annotations` | Service annotations for external access | `{}` | -| `externalAccess.broker.service.extraPorts` | Extra ports to expose in the Kafka external service | `[]` | -| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` | -| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | -| `networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | -| `networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolice | `[]` | -| `networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | -| `networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | -| `networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | +| Name | Description | Value | +| -------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.ports.client` | Kafka svc port for client connections | `9092` | +| `service.ports.controller` | Kafka svc port for controller connections. It is used if "kraft.enabled: true" | `9093` | +| `service.ports.interbroker` | Kafka svc port for inter-broker connections | `9094` | +| `service.ports.external` | Kafka svc port for external connections | `9095` | +| `service.extraPorts` | Extra ports to expose in the Kafka service (normally used with the `sidecar` value) | `[]` | +| `service.nodePorts.client` | Node port for the Kafka client connections | `""` | +| `service.nodePorts.external` | Node port for the Kafka external connections | `""` | +| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `service.clusterIP` | Kafka service Cluster IP | `""` | +| `service.loadBalancerIP` | Kafka service Load Balancer IP | `""` | +| `service.loadBalancerSourceRanges` | Kafka service Load Balancer sources | `[]` | +| `service.allocateLoadBalancerNodePorts` | Whether to allocate node ports when service type is LoadBalancer | `true` | +| `service.externalTrafficPolicy` | Kafka service external traffic policy | `Cluster` | +| `service.annotations` | Additional custom annotations for Kafka service | `{}` | +| `service.headless.controller.annotations` | Annotations for the controller-eligible headless service. | `{}` | +| `service.headless.controller.labels` | Labels for the controller-eligible headless service. | `{}` | +| `service.headless.broker.annotations` | Annotations for the broker-only headless service. | `{}` | +| `service.headless.broker.labels` | Labels for the broker-only headless service. | `{}` | +| `externalAccess.enabled` | Enable Kubernetes external cluster access to Kafka brokers | `false` | +| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs/ports by querying the K8s API | `false` | +| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry | `REGISTRY_NAME` | +| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image repository | `REPOSITORY_NAME/kubectl` | +| `externalAccess.autoDiscovery.image.digest` | Kubectl image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy | `IfNotPresent` | +| `externalAccess.autoDiscovery.image.pullSecrets` | Init container auto-discovery image pull secrets | `[]` | +| `externalAccess.autoDiscovery.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if externalAccess.autoDiscovery.resources is set (externalAccess.autoDiscovery.resources is recommended for production). | `nano` | +| `externalAccess.autoDiscovery.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `externalAccess.autoDiscovery.containerSecurityContext.enabled` | Enable Kafka auto-discovery containers' Security Context | `true` | +| `externalAccess.autoDiscovery.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `externalAccess.autoDiscovery.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `externalAccess.autoDiscovery.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `externalAccess.autoDiscovery.containerSecurityContext.runAsNonRoot` | Set Kafka auto-discovery containers' Security Context runAsNonRoot | `true` | +| `externalAccess.autoDiscovery.containerSecurityContext.allowPrivilegeEscalation` | Set Kafka auto-discovery containers' Security Context allowPrivilegeEscalation | `false` | +| `externalAccess.autoDiscovery.containerSecurityContext.readOnlyRootFilesystem` | Set Kafka auto-discovery containers' Security Context readOnlyRootFilesystem | `true` | +| `externalAccess.autoDiscovery.containerSecurityContext.capabilities.drop` | Set Kafka auto-discovery containers' Security Context capabilities to be dropped | `["ALL"]` | +| `externalAccess.autoDiscovery.containerSecurityContext.seccompProfile.type` | Set Kafka auto-discovery seccomp profile type | `RuntimeDefault` | +| `externalAccess.controller.forceExpose` | If set to true, force exposing controller-eligible nodes although they are configured as controller-only nodes | `false` | +| `externalAccess.controller.service.type` | Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP | `LoadBalancer` | +| `externalAccess.controller.service.ports.external` | Kafka port used for external access when service type is LoadBalancer | `9094` | +| `externalAccess.controller.service.loadBalancerIPs` | Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.loadBalancerNames` | Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.loadBalancerAnnotations` | Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.controller.service.allocateLoadBalancerNodePorts` | Whether to allocate node ports when service type is LoadBalancer | `true` | +| `externalAccess.controller.service.nodePorts` | Array of node ports used for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.externalIPs` | Use distinct service host IPs to configure Kafka external listener when service type is NodePort. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.useHostIPs` | Use service host IPs to configure Kafka external listener when service type is NodePort | `false` | +| `externalAccess.controller.service.usePodIPs` | using the MY_POD_IP address for external access. | `false` | +| `externalAccess.controller.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP | `""` | +| `externalAccess.controller.service.publishNotReadyAddresses` | Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready | `false` | +| `externalAccess.controller.service.labels` | Service labels for external access | `{}` | +| `externalAccess.controller.service.annotations` | Service annotations for external access | `{}` | +| `externalAccess.controller.service.extraPorts` | Extra ports to expose in the Kafka external service | `[]` | +| `externalAccess.broker.service.type` | Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP | `LoadBalancer` | +| `externalAccess.broker.service.ports.external` | Kafka port used for external access when service type is LoadBalancer | `9094` | +| `externalAccess.broker.service.loadBalancerIPs` | Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.loadBalancerNames` | Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.loadBalancerAnnotations` | Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.broker.service.allocateLoadBalancerNodePorts` | Whether to allocate node ports when service type is LoadBalancer | `true` | +| `externalAccess.broker.service.nodePorts` | Array of node ports used for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.externalIPs` | Use distinct service host IPs to configure Kafka external listener when service type is NodePort. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.useHostIPs` | Use service host IPs to configure Kafka external listener when service type is NodePort | `false` | +| `externalAccess.broker.service.usePodIPs` | using the MY_POD_IP address for external access. | `false` | +| `externalAccess.broker.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP | `""` | +| `externalAccess.broker.service.publishNotReadyAddresses` | Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready | `false` | +| `externalAccess.broker.service.labels` | Service labels for external access | `{}` | +| `externalAccess.broker.service.annotations` | Service annotations for external access | `{}` | +| `externalAccess.broker.service.extraPorts` | Extra ports to expose in the Kafka external service | `[]` | +| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolice | `[]` | +| `networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | ### Volume Permissions parameters -| Name | Description | Value | -| ----------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | -| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | -| `volumePermissions.image.registry` | Init container volume-permissions image registry | `REGISTRY_NAME` | -| `volumePermissions.image.repository` | Init container volume-permissions image repository | `REPOSITORY_NAME/os-shell` | -| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | -| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | -| `volumePermissions.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). | `nano` | -| `volumePermissions.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | -| `volumePermissions.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | -| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | +| Name | Description | Value | +| ----------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `REGISTRY_NAME` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `REPOSITORY_NAME/os-shell` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). | `nano` | +| `volumePermissions.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `volumePermissions.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | ### Other Parameters @@ -877,191 +909,191 @@ You can enable this initContainer by setting `volumePermissions.enabled` to `tru ### Metrics parameters -| Name | Description | Value | -| ----------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | -| `metrics.kafka.enabled` | Whether or not to create a standalone Kafka exporter to expose Kafka metrics | `false` | -| `metrics.kafka.image.registry` | Kafka exporter image registry | `REGISTRY_NAME` | -| `metrics.kafka.image.repository` | Kafka exporter image repository | `REPOSITORY_NAME/kafka-exporter` | -| `metrics.kafka.image.digest` | Kafka exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `metrics.kafka.image.pullPolicy` | Kafka exporter image pull policy | `IfNotPresent` | -| `metrics.kafka.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | -| `metrics.kafka.certificatesSecret` | Name of the existing secret containing the optional certificate and key files | `""` | -| `metrics.kafka.tlsCert` | The secret key from the certificatesSecret if 'client-cert' key different from the default (cert-file) | `cert-file` | -| `metrics.kafka.tlsKey` | The secret key from the certificatesSecret if 'client-key' key different from the default (key-file) | `key-file` | -| `metrics.kafka.tlsCaSecret` | Name of the existing secret containing the optional ca certificate for Kafka exporter client authentication | `""` | -| `metrics.kafka.tlsCaCert` | The secret key from the certificatesSecret or tlsCaSecret if 'ca-cert' key different from the default (ca-file) | `ca-file` | -| `metrics.kafka.extraFlags` | Extra flags to be passed to Kafka exporter | `{}` | -| `metrics.kafka.command` | Override Kafka exporter container command | `[]` | -| `metrics.kafka.args` | Override Kafka exporter container arguments | `[]` | -| `metrics.kafka.containerPorts.metrics` | Kafka exporter metrics container port | `9308` | -| `metrics.kafka.livenessProbe.enabled` | Enable livenessProbe | `true` | -| `metrics.kafka.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | -| `metrics.kafka.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | -| `metrics.kafka.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `1` | -| `metrics.kafka.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | -| `metrics.kafka.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | -| `metrics.kafka.readinessProbe.enabled` | Enable readinessProbe | `true` | -| `metrics.kafka.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | -| `metrics.kafka.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | -| `metrics.kafka.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | -| `metrics.kafka.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | -| `metrics.kafka.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `metrics.kafka.startupProbe.enabled` | Enable startupProbe | `false` | -| `metrics.kafka.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | -| `metrics.kafka.startupProbe.periodSeconds` | Period seconds for startupProbe | `5` | -| `metrics.kafka.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | -| `metrics.kafka.startupProbe.failureThreshold` | Failure threshold for startupProbe | `3` | -| `metrics.kafka.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | -| `metrics.kafka.customStartupProbe` | Override default startup probe | `{}` | -| `metrics.kafka.customLivenessProbe` | Override default liveness probe | `{}` | -| `metrics.kafka.customReadinessProbe` | Override default readiness probe | `{}` | -| `metrics.kafka.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.kafka.resources is set (metrics.kafka.resources is recommended for production). | `micro` | -| `metrics.kafka.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | -| `metrics.kafka.podSecurityContext.enabled` | Enable security context for the pods | `true` | -| `metrics.kafka.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | -| `metrics.kafka.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | -| `metrics.kafka.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | -| `metrics.kafka.podSecurityContext.fsGroup` | Set Kafka exporter pod's Security Context fsGroup | `1001` | -| `metrics.kafka.podSecurityContext.seccompProfile.type` | Set Kafka exporter pod's Security Context seccomp profile | `RuntimeDefault` | -| `metrics.kafka.containerSecurityContext.enabled` | Enable Kafka exporter containers' Security Context | `true` | -| `metrics.kafka.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | -| `metrics.kafka.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | -| `metrics.kafka.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | -| `metrics.kafka.containerSecurityContext.runAsNonRoot` | Set Kafka exporter containers' Security Context runAsNonRoot | `true` | -| `metrics.kafka.containerSecurityContext.allowPrivilegeEscalation` | Set Kafka exporter containers' Security Context allowPrivilegeEscalation | `false` | -| `metrics.kafka.containerSecurityContext.readOnlyRootFilesystem` | Set Kafka exporter containers' Security Context readOnlyRootFilesystem | `true` | -| `metrics.kafka.containerSecurityContext.capabilities.drop` | Set Kafka exporter containers' Security Context capabilities to be dropped | `["ALL"]` | -| `metrics.kafka.automountServiceAccountToken` | Mount Service Account token in pod | `false` | -| `metrics.kafka.hostAliases` | Kafka exporter pods host aliases | `[]` | -| `metrics.kafka.podLabels` | Extra labels for Kafka exporter pods | `{}` | -| `metrics.kafka.podAnnotations` | Extra annotations for Kafka exporter pods | `{}` | -| `metrics.kafka.podAffinityPreset` | Pod affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `metrics.kafka.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `soft` | -| `metrics.kafka.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `metrics.kafka.nodeAffinityPreset.key` | Node label key to match Ignored if `metrics.kafka.affinity` is set. | `""` | -| `metrics.kafka.nodeAffinityPreset.values` | Node label values to match. Ignored if `metrics.kafka.affinity` is set. | `[]` | -| `metrics.kafka.affinity` | Affinity for pod assignment | `{}` | -| `metrics.kafka.nodeSelector` | Node labels for pod assignment | `{}` | -| `metrics.kafka.tolerations` | Tolerations for pod assignment | `[]` | -| `metrics.kafka.schedulerName` | Name of the k8s scheduler (other than default) for Kafka exporter | `""` | -| `metrics.kafka.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | -| `metrics.kafka.priorityClassName` | Kafka exporter pods' priorityClassName | `""` | -| `metrics.kafka.topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` | -| `metrics.kafka.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka exporter pod(s) | `[]` | -| `metrics.kafka.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka exporter container(s) | `[]` | -| `metrics.kafka.sidecars` | Add additional sidecar containers to the Kafka exporter pod(s) | `[]` | -| `metrics.kafka.initContainers` | Add init containers to the Kafka exporter pods | `[]` | -| `metrics.kafka.service.ports.metrics` | Kafka exporter metrics service port | `9308` | -| `metrics.kafka.service.clusterIP` | Static clusterIP or None for headless services | `""` | -| `metrics.kafka.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | -| `metrics.kafka.service.annotations` | Annotations for the Kafka exporter service | `{}` | -| `metrics.kafka.serviceAccount.create` | Enable creation of ServiceAccount for Kafka exporter pods | `true` | -| `metrics.kafka.serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | -| `metrics.kafka.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | -| `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` | -| `metrics.jmx.kafkaJmxPort` | JMX port where the exporter will collect metrics, exposed in the Kafka container. | `5555` | -| `metrics.jmx.image.registry` | JMX exporter image registry | `REGISTRY_NAME` | -| `metrics.jmx.image.repository` | JMX exporter image repository | `REPOSITORY_NAME/jmx-exporter` | -| `metrics.jmx.image.digest` | JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` | -| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | -| `metrics.jmx.containerSecurityContext.enabled` | Enable Prometheus JMX exporter containers' Security Context | `true` | -| `metrics.jmx.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | -| `metrics.jmx.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | -| `metrics.jmx.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | -| `metrics.jmx.containerSecurityContext.runAsNonRoot` | Set Prometheus JMX exporter containers' Security Context runAsNonRoot | `true` | -| `metrics.jmx.containerSecurityContext.allowPrivilegeEscalation` | Set Prometheus JMX exporter containers' Security Context allowPrivilegeEscalation | `false` | -| `metrics.jmx.containerSecurityContext.readOnlyRootFilesystem` | Set Prometheus JMX exporter containers' Security Context readOnlyRootFilesystem | `true` | -| `metrics.jmx.containerSecurityContext.capabilities.drop` | Set Prometheus JMX exporter containers' Security Context capabilities to be dropped | `["ALL"]` | -| `metrics.jmx.containerPorts.metrics` | Prometheus JMX exporter metrics container port | `5556` | -| `metrics.jmx.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.jmx.resources is set (metrics.jmx.resources is recommended for production). | `micro` | -| `metrics.jmx.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | -| `metrics.jmx.service.ports.metrics` | Prometheus JMX exporter metrics service port | `5556` | -| `metrics.jmx.service.clusterIP` | Static clusterIP or None for headless services | `""` | -| `metrics.jmx.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | -| `metrics.jmx.service.annotations` | Annotations for the Prometheus JMX exporter service | `{}` | -| `metrics.jmx.whitelistObjectNames` | Allows setting which JMX objects you want to expose to via JMX stats to JMX exporter | `["kafka.controller:*","kafka.server:*","java.lang:*","kafka.network:*","kafka.log:*"]` | -| `metrics.jmx.config` | Configuration file for JMX exporter | `""` | -| `metrics.jmx.existingConfigmap` | Name of existing ConfigMap with JMX exporter configuration | `""` | -| `metrics.jmx.extraRules` | Add extra rules to JMX exporter configuration | `""` | -| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false` | -| `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | -| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `""` | -| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | -| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | -| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | -| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | -| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | -| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | -| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | -| `metrics.prometheusRule.enabled` | if `true`, creates a Prometheus Operator PrometheusRule (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false` | -| `metrics.prometheusRule.namespace` | Namespace in which Prometheus is running | `""` | -| `metrics.prometheusRule.labels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | -| `metrics.prometheusRule.groups` | Prometheus Rule Groups for Kafka | `[]` | +| Name | Description | Value | +| ----------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | +| `metrics.kafka.enabled` | Whether or not to create a standalone Kafka exporter to expose Kafka metrics | `false` | +| `metrics.kafka.image.registry` | Kafka exporter image registry | `REGISTRY_NAME` | +| `metrics.kafka.image.repository` | Kafka exporter image repository | `REPOSITORY_NAME/kafka-exporter` | +| `metrics.kafka.image.digest` | Kafka exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.kafka.image.pullPolicy` | Kafka exporter image pull policy | `IfNotPresent` | +| `metrics.kafka.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.kafka.certificatesSecret` | Name of the existing secret containing the optional certificate and key files | `""` | +| `metrics.kafka.tlsCert` | The secret key from the certificatesSecret if 'client-cert' key different from the default (cert-file) | `cert-file` | +| `metrics.kafka.tlsKey` | The secret key from the certificatesSecret if 'client-key' key different from the default (key-file) | `key-file` | +| `metrics.kafka.tlsCaSecret` | Name of the existing secret containing the optional ca certificate for Kafka exporter client authentication | `""` | +| `metrics.kafka.tlsCaCert` | The secret key from the certificatesSecret or tlsCaSecret if 'ca-cert' key different from the default (ca-file) | `ca-file` | +| `metrics.kafka.extraFlags` | Extra flags to be passed to Kafka exporter | `{}` | +| `metrics.kafka.command` | Override Kafka exporter container command | `[]` | +| `metrics.kafka.args` | Override Kafka exporter container arguments | `[]` | +| `metrics.kafka.containerPorts.metrics` | Kafka exporter metrics container port | `9308` | +| `metrics.kafka.livenessProbe.enabled` | Enable livenessProbe | `true` | +| `metrics.kafka.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `metrics.kafka.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `metrics.kafka.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `1` | +| `metrics.kafka.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `metrics.kafka.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `metrics.kafka.readinessProbe.enabled` | Enable readinessProbe | `true` | +| `metrics.kafka.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `metrics.kafka.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | +| `metrics.kafka.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `metrics.kafka.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `metrics.kafka.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `metrics.kafka.startupProbe.enabled` | Enable startupProbe | `false` | +| `metrics.kafka.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `metrics.kafka.startupProbe.periodSeconds` | Period seconds for startupProbe | `5` | +| `metrics.kafka.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `metrics.kafka.startupProbe.failureThreshold` | Failure threshold for startupProbe | `3` | +| `metrics.kafka.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `metrics.kafka.customStartupProbe` | Override default startup probe | `{}` | +| `metrics.kafka.customLivenessProbe` | Override default liveness probe | `{}` | +| `metrics.kafka.customReadinessProbe` | Override default readiness probe | `{}` | +| `metrics.kafka.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.kafka.resources is set (metrics.kafka.resources is recommended for production). | `micro` | +| `metrics.kafka.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `metrics.kafka.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `metrics.kafka.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `metrics.kafka.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `metrics.kafka.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `metrics.kafka.podSecurityContext.fsGroup` | Set Kafka exporter pod's Security Context fsGroup | `1001` | +| `metrics.kafka.podSecurityContext.seccompProfile.type` | Set Kafka exporter pod's Security Context seccomp profile | `RuntimeDefault` | +| `metrics.kafka.containerSecurityContext.enabled` | Enable Kafka exporter containers' Security Context | `true` | +| `metrics.kafka.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `metrics.kafka.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `metrics.kafka.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `metrics.kafka.containerSecurityContext.runAsNonRoot` | Set Kafka exporter containers' Security Context runAsNonRoot | `true` | +| `metrics.kafka.containerSecurityContext.allowPrivilegeEscalation` | Set Kafka exporter containers' Security Context allowPrivilegeEscalation | `false` | +| `metrics.kafka.containerSecurityContext.readOnlyRootFilesystem` | Set Kafka exporter containers' Security Context readOnlyRootFilesystem | `true` | +| `metrics.kafka.containerSecurityContext.capabilities.drop` | Set Kafka exporter containers' Security Context capabilities to be dropped | `["ALL"]` | +| `metrics.kafka.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `metrics.kafka.hostAliases` | Kafka exporter pods host aliases | `[]` | +| `metrics.kafka.podLabels` | Extra labels for Kafka exporter pods | `{}` | +| `metrics.kafka.podAnnotations` | Extra annotations for Kafka exporter pods | `{}` | +| `metrics.kafka.podAffinityPreset` | Pod affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.kafka.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `metrics.kafka.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.kafka.nodeAffinityPreset.key` | Node label key to match Ignored if `metrics.kafka.affinity` is set. | `""` | +| `metrics.kafka.nodeAffinityPreset.values` | Node label values to match. Ignored if `metrics.kafka.affinity` is set. | `[]` | +| `metrics.kafka.affinity` | Affinity for pod assignment | `{}` | +| `metrics.kafka.nodeSelector` | Node labels for pod assignment | `{}` | +| `metrics.kafka.tolerations` | Tolerations for pod assignment | `[]` | +| `metrics.kafka.schedulerName` | Name of the k8s scheduler (other than default) for Kafka exporter | `""` | +| `metrics.kafka.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | +| `metrics.kafka.priorityClassName` | Kafka exporter pods' priorityClassName | `""` | +| `metrics.kafka.topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` | +| `metrics.kafka.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka exporter pod(s) | `[]` | +| `metrics.kafka.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka exporter container(s) | `[]` | +| `metrics.kafka.sidecars` | Add additional sidecar containers to the Kafka exporter pod(s) | `[]` | +| `metrics.kafka.initContainers` | Add init containers to the Kafka exporter pods | `[]` | +| `metrics.kafka.service.ports.metrics` | Kafka exporter metrics service port | `9308` | +| `metrics.kafka.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `metrics.kafka.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.kafka.service.annotations` | Annotations for the Kafka exporter service | `{}` | +| `metrics.kafka.serviceAccount.create` | Enable creation of ServiceAccount for Kafka exporter pods | `true` | +| `metrics.kafka.serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | +| `metrics.kafka.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | +| `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` | +| `metrics.jmx.kafkaJmxPort` | JMX port where the exporter will collect metrics, exposed in the Kafka container. | `5555` | +| `metrics.jmx.image.registry` | JMX exporter image registry | `REGISTRY_NAME` | +| `metrics.jmx.image.repository` | JMX exporter image repository | `REPOSITORY_NAME/jmx-exporter` | +| `metrics.jmx.image.digest` | JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` | +| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.jmx.containerSecurityContext.enabled` | Enable Prometheus JMX exporter containers' Security Context | `true` | +| `metrics.jmx.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `metrics.jmx.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `metrics.jmx.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `metrics.jmx.containerSecurityContext.runAsNonRoot` | Set Prometheus JMX exporter containers' Security Context runAsNonRoot | `true` | +| `metrics.jmx.containerSecurityContext.allowPrivilegeEscalation` | Set Prometheus JMX exporter containers' Security Context allowPrivilegeEscalation | `false` | +| `metrics.jmx.containerSecurityContext.readOnlyRootFilesystem` | Set Prometheus JMX exporter containers' Security Context readOnlyRootFilesystem | `true` | +| `metrics.jmx.containerSecurityContext.capabilities.drop` | Set Prometheus JMX exporter containers' Security Context capabilities to be dropped | `["ALL"]` | +| `metrics.jmx.containerPorts.metrics` | Prometheus JMX exporter metrics container port | `5556` | +| `metrics.jmx.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.jmx.resources is set (metrics.jmx.resources is recommended for production). | `micro` | +| `metrics.jmx.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `metrics.jmx.service.ports.metrics` | Prometheus JMX exporter metrics service port | `5556` | +| `metrics.jmx.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `metrics.jmx.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.jmx.service.annotations` | Annotations for the Prometheus JMX exporter service | `{}` | +| `metrics.jmx.whitelistObjectNames` | Allows setting which JMX objects you want to expose to via JMX stats to JMX exporter | `["kafka.controller:*","kafka.server:*","java.lang:*","kafka.network:*","kafka.log:*"]` | +| `metrics.jmx.config` | Configuration file for JMX exporter | `""` | +| `metrics.jmx.existingConfigmap` | Name of existing ConfigMap with JMX exporter configuration | `""` | +| `metrics.jmx.extraRules` | Add extra rules to JMX exporter configuration | `""` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.prometheusRule.enabled` | if `true`, creates a Prometheus Operator PrometheusRule (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false` | +| `metrics.prometheusRule.namespace` | Namespace in which Prometheus is running | `""` | +| `metrics.prometheusRule.labels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.groups` | Prometheus Rule Groups for Kafka | `[]` | ### Kafka provisioning parameters -| Name | Description | Value | -| ---------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------- | -| `provisioning.enabled` | Enable kafka provisioning Job | `false` | -| `provisioning.automountServiceAccountToken` | Mount Service Account token in pod | `false` | -| `provisioning.numPartitions` | Default number of partitions for topics when unspecified | `1` | -| `provisioning.replicationFactor` | Default replication factor for topics when unspecified | `1` | -| `provisioning.topics` | Kafka topics to provision | `[]` | -| `provisioning.nodeSelector` | Node labels for pod assignment | `{}` | -| `provisioning.tolerations` | Tolerations for pod assignment | `[]` | -| `provisioning.extraProvisioningCommands` | Extra commands to run to provision cluster resources | `[]` | -| `provisioning.parallel` | Number of provisioning commands to run at the same time | `1` | -| `provisioning.preScript` | Extra bash script to run before topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations | `""` | -| `provisioning.postScript` | Extra bash script to run after topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations | `""` | -| `provisioning.auth.tls.type` | Format to use for TLS certificates. Allowed types: `JKS` and `PEM`. | `jks` | -| `provisioning.auth.tls.certificatesSecret` | Existing secret containing the TLS certificates for the Kafka provisioning Job. | `""` | -| `provisioning.auth.tls.cert` | The secret key from the certificatesSecret if 'cert' key different from the default (tls.crt) | `tls.crt` | -| `provisioning.auth.tls.key` | The secret key from the certificatesSecret if 'key' key different from the default (tls.key) | `tls.key` | -| `provisioning.auth.tls.caCert` | The secret key from the certificatesSecret if 'caCert' key different from the default (ca.crt) | `ca.crt` | -| `provisioning.auth.tls.keystore` | The secret key from the certificatesSecret if 'keystore' key different from the default (keystore.jks) | `keystore.jks` | -| `provisioning.auth.tls.truststore` | The secret key from the certificatesSecret if 'truststore' key different from the default (truststore.jks) | `truststore.jks` | -| `provisioning.auth.tls.passwordsSecret` | Name of the secret containing passwords to access the JKS files or PEM key when they are password-protected. | `""` | -| `provisioning.auth.tls.keyPasswordSecretKey` | The secret key from the passwordsSecret if 'keyPasswordSecretKey' key different from the default (key-password) | `key-password` | -| `provisioning.auth.tls.keystorePasswordSecretKey` | The secret key from the passwordsSecret if 'keystorePasswordSecretKey' key different from the default (keystore-password) | `keystore-password` | -| `provisioning.auth.tls.truststorePasswordSecretKey` | The secret key from the passwordsSecret if 'truststorePasswordSecretKey' key different from the default (truststore-password) | `truststore-password` | -| `provisioning.auth.tls.keyPassword` | Password to access the password-protected PEM key if necessary. Ignored if 'passwordsSecret' is provided. | `""` | -| `provisioning.auth.tls.keystorePassword` | Password to access the JKS keystore. Ignored if 'passwordsSecret' is provided. | `""` | -| `provisioning.auth.tls.truststorePassword` | Password to access the JKS truststore. Ignored if 'passwordsSecret' is provided. | `""` | -| `provisioning.command` | Override provisioning container command | `[]` | -| `provisioning.args` | Override provisioning container arguments | `[]` | -| `provisioning.extraEnvVars` | Extra environment variables to add to the provisioning pod | `[]` | -| `provisioning.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | -| `provisioning.extraEnvVarsSecret` | Secret with extra environment variables | `""` | -| `provisioning.podAnnotations` | Extra annotations for Kafka provisioning pods | `{}` | -| `provisioning.podLabels` | Extra labels for Kafka provisioning pods | `{}` | -| `provisioning.serviceAccount.create` | Enable creation of ServiceAccount for Kafka provisioning pods | `true` | -| `provisioning.serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | -| `provisioning.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | -| `provisioning.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if provisioning.resources is set (provisioning.resources is recommended for production). | `micro` | -| `provisioning.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | -| `provisioning.podSecurityContext.enabled` | Enable security context for the pods | `true` | -| `provisioning.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | -| `provisioning.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | -| `provisioning.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | -| `provisioning.podSecurityContext.fsGroup` | Set Kafka provisioning pod's Security Context fsGroup | `1001` | -| `provisioning.podSecurityContext.seccompProfile.type` | Set Kafka provisioning pod's Security Context seccomp profile | `RuntimeDefault` | -| `provisioning.containerSecurityContext.enabled` | Enable Kafka provisioning containers' Security Context | `true` | -| `provisioning.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | -| `provisioning.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | -| `provisioning.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | -| `provisioning.containerSecurityContext.runAsNonRoot` | Set Kafka provisioning containers' Security Context runAsNonRoot | `true` | -| `provisioning.containerSecurityContext.allowPrivilegeEscalation` | Set Kafka provisioning containers' Security Context allowPrivilegeEscalation | `false` | -| `provisioning.containerSecurityContext.readOnlyRootFilesystem` | Set Kafka provisioning containers' Security Context readOnlyRootFilesystem | `true` | -| `provisioning.containerSecurityContext.capabilities.drop` | Set Kafka provisioning containers' Security Context capabilities to be dropped | `["ALL"]` | -| `provisioning.schedulerName` | Name of the k8s scheduler (other than default) for kafka provisioning | `""` | -| `provisioning.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | -| `provisioning.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka provisioning pod(s) | `[]` | -| `provisioning.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka provisioning container(s) | `[]` | -| `provisioning.sidecars` | Add additional sidecar containers to the Kafka provisioning pod(s) | `[]` | -| `provisioning.initContainers` | Add additional Add init containers to the Kafka provisioning pod(s) | `[]` | -| `provisioning.waitForKafka` | If true use an init container to wait until kafka is ready before starting provisioning | `true` | -| `provisioning.useHelmHooks` | Flag to indicate usage of helm hooks | `true` | +| Name | Description | Value | +| ---------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| `provisioning.enabled` | Enable kafka provisioning Job | `false` | +| `provisioning.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `provisioning.numPartitions` | Default number of partitions for topics when unspecified | `1` | +| `provisioning.replicationFactor` | Default replication factor for topics when unspecified | `1` | +| `provisioning.topics` | Kafka topics to provision | `[]` | +| `provisioning.nodeSelector` | Node labels for pod assignment | `{}` | +| `provisioning.tolerations` | Tolerations for pod assignment | `[]` | +| `provisioning.extraProvisioningCommands` | Extra commands to run to provision cluster resources | `[]` | +| `provisioning.parallel` | Number of provisioning commands to run at the same time | `1` | +| `provisioning.preScript` | Extra bash script to run before topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations | `""` | +| `provisioning.postScript` | Extra bash script to run after topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations | `""` | +| `provisioning.auth.tls.type` | Format to use for TLS certificates. Allowed types: `JKS` and `PEM`. | `jks` | +| `provisioning.auth.tls.certificatesSecret` | Existing secret containing the TLS certificates for the Kafka provisioning Job. | `""` | +| `provisioning.auth.tls.cert` | The secret key from the certificatesSecret if 'cert' key different from the default (tls.crt) | `tls.crt` | +| `provisioning.auth.tls.key` | The secret key from the certificatesSecret if 'key' key different from the default (tls.key) | `tls.key` | +| `provisioning.auth.tls.caCert` | The secret key from the certificatesSecret if 'caCert' key different from the default (ca.crt) | `ca.crt` | +| `provisioning.auth.tls.keystore` | The secret key from the certificatesSecret if 'keystore' key different from the default (keystore.jks) | `keystore.jks` | +| `provisioning.auth.tls.truststore` | The secret key from the certificatesSecret if 'truststore' key different from the default (truststore.jks) | `truststore.jks` | +| `provisioning.auth.tls.passwordsSecret` | Name of the secret containing passwords to access the JKS files or PEM key when they are password-protected. | `""` | +| `provisioning.auth.tls.keyPasswordSecretKey` | The secret key from the passwordsSecret if 'keyPasswordSecretKey' key different from the default (key-password) | `key-password` | +| `provisioning.auth.tls.keystorePasswordSecretKey` | The secret key from the passwordsSecret if 'keystorePasswordSecretKey' key different from the default (keystore-password) | `keystore-password` | +| `provisioning.auth.tls.truststorePasswordSecretKey` | The secret key from the passwordsSecret if 'truststorePasswordSecretKey' key different from the default (truststore-password) | `truststore-password` | +| `provisioning.auth.tls.keyPassword` | Password to access the password-protected PEM key if necessary. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.auth.tls.keystorePassword` | Password to access the JKS keystore. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.auth.tls.truststorePassword` | Password to access the JKS truststore. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.command` | Override provisioning container command | `[]` | +| `provisioning.args` | Override provisioning container arguments | `[]` | +| `provisioning.extraEnvVars` | Extra environment variables to add to the provisioning pod | `[]` | +| `provisioning.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `provisioning.extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `provisioning.podAnnotations` | Extra annotations for Kafka provisioning pods | `{}` | +| `provisioning.podLabels` | Extra labels for Kafka provisioning pods | `{}` | +| `provisioning.serviceAccount.create` | Enable creation of ServiceAccount for Kafka provisioning pods | `true` | +| `provisioning.serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | +| `provisioning.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | +| `provisioning.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if provisioning.resources is set (provisioning.resources is recommended for production). | `micro` | +| `provisioning.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `provisioning.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `provisioning.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `provisioning.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `provisioning.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `provisioning.podSecurityContext.fsGroup` | Set Kafka provisioning pod's Security Context fsGroup | `1001` | +| `provisioning.podSecurityContext.seccompProfile.type` | Set Kafka provisioning pod's Security Context seccomp profile | `RuntimeDefault` | +| `provisioning.containerSecurityContext.enabled` | Enable Kafka provisioning containers' Security Context | `true` | +| `provisioning.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `provisioning.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `provisioning.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `provisioning.containerSecurityContext.runAsNonRoot` | Set Kafka provisioning containers' Security Context runAsNonRoot | `true` | +| `provisioning.containerSecurityContext.allowPrivilegeEscalation` | Set Kafka provisioning containers' Security Context allowPrivilegeEscalation | `false` | +| `provisioning.containerSecurityContext.readOnlyRootFilesystem` | Set Kafka provisioning containers' Security Context readOnlyRootFilesystem | `true` | +| `provisioning.containerSecurityContext.capabilities.drop` | Set Kafka provisioning containers' Security Context capabilities to be dropped | `["ALL"]` | +| `provisioning.schedulerName` | Name of the k8s scheduler (other than default) for kafka provisioning | `""` | +| `provisioning.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | +| `provisioning.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka provisioning pod(s) | `[]` | +| `provisioning.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka provisioning container(s) | `[]` | +| `provisioning.sidecars` | Add additional sidecar containers to the Kafka provisioning pod(s) | `[]` | +| `provisioning.initContainers` | Add additional Add init containers to the Kafka provisioning pod(s) | `[]` | +| `provisioning.waitForKafka` | If true use an init container to wait until kafka is ready before starting provisioning | `true` | +| `provisioning.useHelmHooks` | Flag to indicate usage of helm hooks | `true` | ### KRaft chart parameters diff --git a/bitnami/kafka/templates/broker/hpa.yaml b/bitnami/kafka/templates/broker/hpa.yaml new file mode 100644 index 00000000000000..318921d9c96fcd --- /dev/null +++ b/bitnami/kafka/templates/broker/hpa.yaml @@ -0,0 +1,51 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} +{{- $replicaCount := int .Values.broker.replicaCount }} +{{- if and (gt $replicaCount 0) .Values.broker.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ printf "%s-broker" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: broker + app.kubernetes.io/part-of: kafka + {{- if or .Values.broker.autoscaling.hpa.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.broker.autoscaling.hpa.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + name: {{ printf "%s-broker" (include "common.names.fullname" .) }} + minReplicas: {{ .Values.broker.autoscaling.hpa.minReplicas }} + maxReplicas: {{ .Values.broker.autoscaling.hpa.maxReplicas }} + metrics: + {{- if .Values.broker.autoscaling.hpa.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.broker.autoscaling.hpa.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.broker.autoscaling.hpa.targetCPU }} + {{- end }} + {{- end }} + {{- if .Values.broker.autoscaling.hpa.targetMemory }} + - type: Resource + resource: + name: memory + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.broker.autoscaling.hpa.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.broker.autoscaling.hpa.targetMemory }} + {{- end }} + {{- end }} +{{- end }} diff --git a/bitnami/kafka/templates/broker/vpa.yaml b/bitnami/kafka/templates/broker/vpa.yaml new file mode 100644 index 00000000000000..9308bf142ebb2d --- /dev/null +++ b/bitnami/kafka/templates/broker/vpa.yaml @@ -0,0 +1,45 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} +{{- $replicaCount := int .Values.broker.replicaCount }} +{{- if and (gt $replicaCount 0) (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1/VerticalPodAutoscaler") .Values.broker.autoscaling.vpa.enabled }} +apiVersion: {{ include "common.capabilities.vpa.apiVersion" . }} +kind: VerticalPodAutoscaler +metadata: + name: {{ printf "%s-broker" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: broker + app.kubernetes.io/part-of: kafka + {{- if or .Values.broker.autoscaling.vpa.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.broker.autoscaling.vpa.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: kafka + {{- with .Values.broker.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.broker.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.broker.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + apiVersion: {{ (include "common.capabilities.statefulset.apiVersion" .) }} + kind: StatefulSet + name: {{ printf "%s-broker" (include "common.names.fullname" .) }} + {{- if .Values.broker.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.broker.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/bitnami/kafka/templates/controller-eligible/hpa.yaml b/bitnami/kafka/templates/controller-eligible/hpa.yaml new file mode 100644 index 00000000000000..dfc18ed94ecea9 --- /dev/null +++ b/bitnami/kafka/templates/controller-eligible/hpa.yaml @@ -0,0 +1,51 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} +{{- $replicaCount := int .Values.controller.replicaCount }} +{{- if and .Values.kraft.enabled (gt $replicaCount 0) .Values.controller.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ printf "%s-controller" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: controller + app.kubernetes.io/part-of: kafka + {{- if or .Values.controller.autoscaling.hpa.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.controller.autoscaling.hpa.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + name: {{ printf "%s-controller" (include "common.names.fullname" .) }} + minReplicas: {{ .Values.controller.autoscaling.hpa.minReplicas }} + maxReplicas: {{ .Values.controller.autoscaling.hpa.maxReplicas }} + metrics: + {{- if .Values.controller.autoscaling.hpa.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.controller.autoscaling.hpa.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.controller.autoscaling.hpa.targetCPU }} + {{- end }} + {{- end }} + {{- if .Values.controller.autoscaling.hpa.targetMemory }} + - type: Resource + resource: + name: memory + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.controller.autoscaling.hpa.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.controller.autoscaling.hpa.targetMemory }} + {{- end }} + {{- end }} +{{- end }} diff --git a/bitnami/kafka/templates/controller-eligible/vpa.yaml b/bitnami/kafka/templates/controller-eligible/vpa.yaml new file mode 100644 index 00000000000000..55c1ece51b5f14 --- /dev/null +++ b/bitnami/kafka/templates/controller-eligible/vpa.yaml @@ -0,0 +1,45 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} +{{- $replicaCount := int .Values.controller.replicaCount }} +{{- if and .Values.kraft.enabled (gt $replicaCount 0) (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1/VerticalPodAutoscaler") .Values.controller.autoscaling.vpa.enabled }} +apiVersion: {{ include "common.capabilities.vpa.apiVersion" . }} +kind: VerticalPodAutoscaler +metadata: + name: {{ printf "%s-controller" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: controller + app.kubernetes.io/part-of: kafka + {{- if or .Values.controller.autoscaling.vpa.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.controller.autoscaling.vpa.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: kafka + {{- with .Values.controller.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + apiVersion: {{ (include "common.capabilities.statefulset.apiVersion" .) }} + kind: StatefulSet + name: {{ printf "%s-controller" (include "common.names.fullname" .) }} + {{- if .Values.controller.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.controller.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/bitnami/kafka/values.yaml b/bitnami/kafka/values.yaml index dbb0d552531f48..ca29c9d8dbdeef 100644 --- a/bitnami/kafka/values.yaml +++ b/bitnami/kafka/values.yaml @@ -791,6 +791,49 @@ controller: ## containerPort: 1234 ## initContainers: [] + ## @section Experimental: Kafka Controller Autoscaling configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + ## + autoscaling: + vpa: + ## @param controller.autoscaling.vpa.enabled Enable VPA + ## + enabled: false + ## @param controller.autoscaling.vpa.annotations Annotations for VPA resource + ## + annotations: {} + ## @param controller.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + ## + controlledResources: [] + ## @param controller.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + maxAllowed: {} + ## @param controller.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + minAllowed: {} + updatePolicy: + ## @param controller.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updateMode: Auto + hpa: + ## @param controller.autoscaling.hpa.enabled Enable HPA for Kafka Controller + ## + enabled: false + ## @param controller.autoscaling.hpa.minReplicas Minimum number of Kafka Controller replicas + ## + minReplicas: "" + ## @param controller.autoscaling.hpa.maxReplicas Maximum number of Kafka Controller replicas + ## + maxReplicas: "" + ## @param controller.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## + targetCPU: "" + ## @param controller.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + targetMemory: "" ## Kafka Pod Disruption Budget ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ ## @param controller.pdb.create Deploy a pdb object for the Kafka pod @@ -1211,6 +1254,49 @@ broker: create: false minAvailable: "" maxUnavailable: 1 + ## @section Experimental: Kafka Broker Autoscaling configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + ## + autoscaling: + vpa: + ## @param broker.autoscaling.vpa.enabled Enable VPA + ## + enabled: false + ## @param broker.autoscaling.vpa.annotations Annotations for VPA resource + ## + annotations: {} + ## @param broker.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + ## + controlledResources: [] + ## @param broker.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + maxAllowed: {} + ## @param broker.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + minAllowed: {} + updatePolicy: + ## @param broker.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updateMode: Auto + hpa: + ## @param broker.autoscaling.hpa.enabled Enable HPA for Kafka Broker + ## + enabled: false + ## @param broker.autoscaling.hpa.minReplicas Minimum number of Kafka Broker replicas + ## + minReplicas: "" + ## @param broker.autoscaling.hpa.maxReplicas Maximum number of Kafka Broker replicas + ## + maxReplicas: "" + ## @param broker.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## + targetCPU: "" + ## @param broker.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + targetMemory: "" ## Enable persistence using Persistent Volume Claims ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ ##