diff --git a/.dev/dev_arm64.yaml b/.dev/dev_arm64.yaml index b43e73bce..220140d3d 100644 --- a/.dev/dev_arm64.yaml +++ b/.dev/dev_arm64.yaml @@ -32,7 +32,8 @@ services: KAFKA_CLUSTERS_0_AUDIT_CONSOLEAUDITENABLED: 'true' kafka0: - image: confluentinc/cp-kafka:7.2.1.arm64 + image: confluentinc/cp-kafka:7.6.0.arm64 + user: "0:0" hostname: kafka0 container_name: kafka0 ports: @@ -56,12 +57,10 @@ services: KAFKA_JMX_PORT: 9997 # KAFKA_JMX_HOSTNAME: localhost # uncomment this line and comment the next one if running with kafka-ui as a jar KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997 - volumes: - - ../documentation/compose/scripts/update_run.sh:/tmp/update_run.sh - command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'" + CLUSTER_ID: 'MkU3OEVBNTcwNTJENDM2Qk' schema-registry0: - image: confluentinc/cp-schema-registry:7.2.1.arm64 + image: confluentinc/cp-schema-registry:7.6.0.arm64 ports: - 8085:8085 depends_on: @@ -77,7 +76,7 @@ services: SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas kafka-connect0: - image: confluentinc/cp-kafka-connect:7.2.1.arm64 + image: confluentinc/cp-kafka-connect:7.6.0.arm64 ports: - 8083:8083 depends_on: @@ -102,7 +101,7 @@ services: CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components,/usr/local/share/kafka/plugins,/usr/share/filestream-connectors" ksqldb0: - image: confluentinc/ksqldb-server:0.18.0 + image: confluentinc/cp-ksqldb-server:7.6.0.arm64 depends_on: - kafka0 - kafka-connect0 @@ -120,7 +119,7 @@ services: KSQL_CACHE_MAX_BYTES_BUFFERING: 0 kafka-init-topics: - image: confluentinc/cp-kafka:7.2.1.arm64 + image: confluentinc/cp-kafka:7.6.0.arm64 volumes: - ../documentation/compose/data/message.json:/data/message.json depends_on: diff --git a/api/src/main/java/io/kafbat/ui/service/rbac/extractor/OauthAuthorityExtractor.java b/api/src/main/java/io/kafbat/ui/service/rbac/extractor/OauthAuthorityExtractor.java index a4458e64b..6d14ab870 100644 --- a/api/src/main/java/io/kafbat/ui/service/rbac/extractor/OauthAuthorityExtractor.java +++ b/api/src/main/java/io/kafbat/ui/service/rbac/extractor/OauthAuthorityExtractor.java @@ -76,7 +76,7 @@ private Set extractRoles(AccessControlService acs, DefaultOAuth2User pri var rolesFieldName = provider.getCustomParams().get(ROLES_FIELD_PARAM_NAME); if (rolesFieldName == null) { - log.warn("Provider [{}] doesn't contain a roles field param name, won't map roles", provider); + log.warn("Provider [{}] doesn't contain a roles field param name, won't map roles", provider.getClientName()); return Collections.emptySet(); } diff --git a/documentation/compose/scripts/clusterID b/documentation/compose/scripts/clusterID deleted file mode 100644 index 4417a5a68..000000000 --- a/documentation/compose/scripts/clusterID +++ /dev/null @@ -1 +0,0 @@ -zlFiTJelTOuhnklFwLWixw \ No newline at end of file diff --git a/documentation/compose/scripts/create_cluster_id.sh b/documentation/compose/scripts/create_cluster_id.sh deleted file mode 100644 index e921e836c..000000000 --- a/documentation/compose/scripts/create_cluster_id.sh +++ /dev/null @@ -1 +0,0 @@ -kafka-storage random-uuid > /workspace/kafbat-ui/documentation/compose/clusterID diff --git a/documentation/compose/scripts/update_run.sh b/documentation/compose/scripts/update_run.sh deleted file mode 100755 index 023c832b4..000000000 --- a/documentation/compose/scripts/update_run.sh +++ /dev/null @@ -1,11 +0,0 @@ -# This script is required to run kafka cluster (without zookeeper) -#!/bin/sh - -# Docker workaround: Remove check for KAFKA_ZOOKEEPER_CONNECT parameter -sed -i '/KAFKA_ZOOKEEPER_CONNECT/d' /etc/confluent/docker/configure - -# Docker workaround: Ignore cub zk-ready -sed -i 's/cub zk-ready/echo ignore zk-ready/' /etc/confluent/docker/ensure - -# KRaft required step: Format the storage directory with a new cluster ID -echo "kafka-storage format --ignore-formatted -t $(kafka-storage random-uuid) -c /etc/kafka/kafka.properties" >> /etc/confluent/docker/ensure \ No newline at end of file diff --git a/documentation/compose/scripts/update_run_cluster.sh b/documentation/compose/scripts/update_run_cluster.sh deleted file mode 100644 index 31da333aa..000000000 --- a/documentation/compose/scripts/update_run_cluster.sh +++ /dev/null @@ -1,11 +0,0 @@ -# This script is required to run kafka cluster (without zookeeper) -#!/bin/sh - -# Docker workaround: Remove check for KAFKA_ZOOKEEPER_CONNECT parameter -sed -i '/KAFKA_ZOOKEEPER_CONNECT/d' /etc/confluent/docker/configure - -# Docker workaround: Ignore cub zk-ready -sed -i 's/cub zk-ready/echo ignore zk-ready/' /etc/confluent/docker/ensure - -# KRaft required step: Format the storage directory with a new cluster ID -echo "kafka-storage format --ignore-formatted -t $(cat /tmp/clusterID) -c /etc/kafka/kafka.properties" >> /etc/confluent/docker/ensure \ No newline at end of file